@booklet {deanpfoster-bfatog2021,
title = {Calibeating : Beating Forecasters at Their Own Game},
journal = {Discussion Papers},
number = {743},
year = {2021},
month = {10},
abstract = {Forecasters should be tested by the Brier score and not just by the calibration score, which can always be made arbitrarily small. The Brier score is the sum of the calibration score and the refinement score; the latter measures how good the sorting into bins with the same forecast is, and thus attests to expertise. This raises the question of whether one can gain calibration without losing expertise, which we refer to as calibeating. We provide an easy way to calibeat any forecast, by a deterministic online procedure. We moreover show that calibeating can be achieved by a stochastic procedure that is itself calibrated, and then extend the results to simultaneously calibeating multiple procedures, and to deterministic procedures that are continuously calibrated.},
url = {http://www.ma.huji.ac.il/hart/publ.html$\#$calib-beat},
author = {Dean P. Foster, Sergiu Hart}
}
@booklet {hart-cftmp2021,
title = {Calibrated Forecasts: The Minimax Proof},
journal = {Discussion Papers},
number = {744},
year = {2021},
month = {11},
abstract = {A formal write-up of the simple proof (1995) of the existence of calibrated forecasts by the minimax theorem, which moreover shows that N^3 periods suffice to guarantee a 1/N calibration error.},
url = {http://www.ma.huji.ac.il/hart/publ.html$\#$calib-minmax},
author = {Sergiu Hart}
}
@booklet {elonkohlberg-dtmotc2021,
title = {Demystifying the Math of the Coronavirus},
journal = {Discussion Papers},
number = {741},
year = {2021},
month = {3},
abstract = {We provide an elementary mathematical description of the spread of the coronavirus. We explain two fundamental relationships: How the rate of growth in new infections is determined by the effective reproductive number ; and how the effective reproductive number is affected by social distancing. By making a key approximation, we are able to formulate these relationships very simply and thereby avoid complicated mathematics. The same approximation leads to an elementary method for estimating the effective reproductive number.},
url = {/files/dp741.pdf},
author = {Elon Kohlberg, Abraham Neyman}
}
@booklet {constantinesorokin-pidica2021,
title = {Pure Information Design in Classic Auctions},
journal = {Discussion Papers},
number = {742},
year = {2021},
month = {7},
abstract = {In many auction environments sellers are better informed about bidders{\textquoteright} valuations than the bidders themselves. For such environments we derive a sharp and general optimal policy of information transmission in the case of independent private values. Under this policy bidders whose (ex-post) valuation is below a certain threshold are provided with all the information (about their valuations), but those bidders whose valuation lies below the threshold receive no information whatsoever. Surprisingly, the threshold expressed in percentiles is independent of the probability distribution over bidders{\textquoteright} ex-post valuations; it depends solely on the number of bidders. Similar results are also derived for the bidder-optimal policy. Our analysis builds on the approach of Bayesian persuasion and on a linearity of sellers{\textquoteright} revenues as a function of the inverse distribution. This latter property allows us to use important results on stochastic comparisons.},
url = {/files/dp742.pdf},
author = {Constantine Sorokin, Eyal Winter}
}
@booklet {toddrkaplan-otsuosiipfa2021,
title = {On the Strategic Use of Seller Information in Private-Value First-Place Auctions},
journal = {Discussion Papers},
number = {745},
year = {2021},
month = {12},
abstract = {In the framework of a private-value auction first-price, we consider the seller as a player in a game with the buyers in which he has private information about their realized values. We ask whether the seller can benefit by using his private information strategically. We find that in fact, depending upon his information, set of signals, and commitment power the seller may indeed increase his revenue by strategic transmission of his information. We study mainly the case of partial truthful commitment (VC) in which the seller can commit to send only truthful (verifiable) messages. We show that in the case of two buyers with values distributed independently uniformly on [0,1], a seller informed of the private values of the buyers, can achieve a revenue close to 1/2 by sending verifiable messages (compared to 1/3 in the standard auction), and this is the largest revenue that he can reach with any signaling strategy and any level of commitment. The case studied here provides valuable insight into the issue of strategic use of information which applies more generally.},
url = {/files/dp745.pdf},
author = {Todd R. Kaplan, Shmuel Zamir}
}
@booklet {bar-hillel-saac2021,
title = {Stumpers: An Annotated Compendium},
journal = {Discussion Papers},
number = {737},
year = {2021},
month = {1},
abstract = {A stumper is a riddle whose solution is typically so elusive that it does not come to mind, at least initially - leaving the responder stumped. Stumpers work by eliciting a (typically visual) representation of the narrative, in which the solution is not to be found. In order to solve the stumper, the blocking representation must be changed, which does not happen to most respondents. I have collected all the riddles I know at this time that qualify, in my opinion, as stumpers. I have composed a few, and tested many. Whenever rates of correct solutions were available, they are included, giving a rough proxy for difficulty},
url = {/files/dp737.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {aumann-wc2021,
title = {Why Consciousness?},
journal = {Discussion Papers},
number = {740},
year = {2021},
month = {5},
abstract = {Emotions specially desire and the objects of desire, like enjoyment and satisfaction drive much of what we do; indeed they drive all we do that is not recurrent. They are thus indispensable to human life. Inter alia, emotions enable the operation of incentives like hunger for eating that motivate us to perform tasks that are vital to our lives. We suggest that the adaptive function of consciousness is to enable emotions to operate.},
url = {/files/dp740.pdf},
author = {Robert J. Aumann}
}
@booklet {alexanderkravtsov-aaatstm2020,
title = {An Axiomatic Approach to Sensors Trust Measurements},
journal = {Discussion Papers},
number = {739},
year = {2020},
month = {8},
abstract = {A set of sensors is used to identify which of the users, from a pre-specified set of users, is currently using a device. Each sensor provides a name of a user and a real number representing its level of confidence in the assessment. However, the sensors measure different signals for different traits that are largely unrelated. To be able to implement a policy based on these measurements, one needs to aggregate the information provided by all sensors. We use an axiomatic approach to provide several reasonable trust functions. We show that by providing a few desirable properties we can derive several solutions that are characterized by these properties. Our analysis makes use of an important result by Kolmogorov (1930).},
url = {/files/dp739.pdf},
author = {Alexander Kravtsov, Eyal Winter}
}
@booklet {danielkahneman-claci2020,
title = {Comment: Laplace and Cognitive Illusions},
journal = {Discussion Papers},
number = {735},
year = {2020},
month = {6},
publisher = {Statistical Science, Vol. 35, No. 2, 2020, Pp. 171-172},
abstract = {Reports in the 1970s of cognitive illusions in judgments of uncertainty had been anticipated by Laplace 150 years earlier. We discuss Miller and Gelman{\textquoteright}s remark that Laplace{\textquoteright}s anticipation of the main ideas of the heuristics and biases approach "gives us a new perspective on these ideas as more universal and less contingent on particular developments [that came much] later."},
url = {/files/dp735.pdf},
author = {Daniel Kahneman, Maya Bar-Hillel}
}
@booklet {alexgershkov-eps2020,
title = {Exploitative Priority Service},
journal = {Discussion Papers},
number = {738},
year = {2020},
month = {8},
abstract = {We analyze the implications of introducing priority service on customers{\textquoteright} welfare. In monopoly markets, introducing priority service decreases the customers{\textquoteright} surplus despite increasing the assignment efficiency: the monopolist extracts from customers a total payment higher than the total efficiency gain generated by the service and hence leaves customers worse off compared with the situation where no priority is offered at all. In duopoly markets with homogeneous customers the equilibrium price and customers{\textquoteright} welfare coincide with the monopoly outcome where this monopolist faces half of the market. With heterogeneous customers as well priority reduces the aggregated consumers{\textquoteright} welfare. Our conclusion is that priority service erects barriers to competition that are embedded in the nature of the service provided, with the victims of these barriers primarily being agents with low willingness or low ability to pay for the priority.},
url = {/files/dp738.pdf},
author = {Alex Gershkov, Eyal Winter}
}
@booklet {yigalattali-tfaofl2020,
title = {False Allure of Fast Lures, The},
journal = {Discussion Papers},
number = {733},
year = {2020},
month = {2},
publisher = {Judgement and Decision Making, Vol. 15, No. 1, January 2020, Pp. 93-111},
abstract = {The Cognitive Reflection Test (CRT) allegedly measures the tendency to override the prepotent incorrect answers to some special problems, and to engage in further reflection. A growing literature suggests that the CRT is a powerful predictor of performance in a wide range of tasks. This research has mostly glossed over the fact that the CRT is composed of math problems. The purpose of this paper is to investigate whether numerical CRT items do indeed call upon more than is required by standard math problems, and whether the latter predict performance in other tasks as well as the CRT. In Study 1 we selected from a bank of standard math problems items that, like CRT items, have a fast lure, as well as others which do not. A 1-factor model was the best supported measurement model for the underlying abilities required by all three item types. Moreover, the quality of all these items "CRT and math problems alike "as predictors of performance on a set of choice and reasoning tasks did not depend on whether or not they had a fast lure, but rather only on their quality as math items. In other words, CRT items seem not to be a special category of math problems, although they are quite excellent ones. Study 2 replicated these results with a different population and a different set of math problems.},
url = {/files/dp733.pdf},
author = {Yigal Attali, Maya Bar-Hillel}
}
@booklet {zak-fcpduwpamcos2020,
title = {Female Chess Players Do Underperform When Playing Against Men: Commentary on Stafford (2018)},
journal = {Discussion Papers},
number = {734},
year = {2020},
month = {3},
abstract = {Stafford (2018) found that female chess players outperform expectations when playing against men, in a study of data from over 5.5 million official games around the world. I examined whether that result could stem from not controlling for the ages of both players, as female players tend to be much younger than male players. Using the same data as Stafford, I was able to replicate his main result only when the opponent s age was ignored. When the ages of both players were included in the analysis, the gender-composition effect was reversed. Further analyses using other data demonstrated the robustness of this pattern, re-establishing that female chess players underperform when playing against men. Prior to Stafford s paper, the leading premise was that women encounter psychological obstacles that prevent them from performing at their normal capacity against men. My commentary continues that line of evidence and is consistent with the stereotype-threat explanation.},
url = {/files/dp734.pdf},
author = {Zak, Uri}
}
@booklet {sergiuhart-ppnlati2020,
title = {Posterior Probabilities: Nonmonotonicity, Log-Concavity, and Tur{\textexclamdown}n{\textquoteright}s Inequality},
journal = {Discussion Papers},
number = {736},
year = {2020},
month = {7},
abstract = {In the standard Bayesian framework the data are assumed to be generated by a distribution parametrized by {\c\ } in a parameter space , over which a prior distribution is defined. A Bayesian statistician quantifies the belief that the true parameter is {\c\ }_0 in by its posterior probability given the observed data. We investigate the behavior of the posterior belief in {\c\ }_0 when the data are generated under some parameter {\c\ }_1, which may or may not be be the same as {\c\ }_0. Starting from stochastic orders, specifically, likelihood ratio dominance, that obtain for resulting distributions of posteriors, we consider monotonicity properties of the posterior probabilities as a function of the sample size when data arrive sequentially. While the {\c\ }_0-posterior is monotonically increasing (i.e., it is a submartingale) when the data are generated under that same {\c\ }_0, it need not be monotonically decreasing in general, not even in terms of its overall expectation, when the data are generated under a different {\c\ }_1; in fact, it may keep going up and down many times. In the framework of simple iid coin tosses, we show that under certain conditions the overall expected posterior of {\c\ }_0 eventually becomes monotonically decreasing when the data are generated under {\c\ }_1~{\c\ }_0. Moreover, we prove that when the prior is uniform this expected posterior is a log-concave function of the sample size, by developing an inequality that is related to Tur{\textexclamdown}n{\textquoteright}s inequality for Legendre polynomials.},
url = {http://www.ma.huji.ac.il/hart/abs/post-seq.html},
author = {Sergiu Hart, Yosef Rinott}
}
@booklet {mayabar-hillel-bbonaca2019,
title = {Baffling Bathrooms: On Navigability and Choice Architecture},
journal = {Discussion Papers},
number = {726},
year = {2019},
month = {6},
publisher = {Behavioral Public Policy Blog},
url = {/files/dp726.pdf},
author = {Maya Bar-Hillel, Cass R. Sunstein}
}
@booklet {bar-hillel-tbfipj2019,
title = {Base-Rate Fallacy in Probability Judgments, The},
journal = {Discussion Papers},
number = {732},
year = {2019},
month = {12},
publisher = {Acta Psychologica, Vol. 44 (1980), P. 211-233},
abstract = {The base-rate fallacy is people{\textquoteright}s tendency to ignore base rates in favor of, e.g., individuating information (when such is available), rather than integrate the two. This tendency has important implications for understanding judgment phenomena in many clinical, legal, and social-psychological settings. An explanation of this phenomenon is offered, according to which people order information by its perceived degree of relevance, and let high-relevance information dominate low-relevance information. Information is deemed more relevant when it relates more specifically to a judged target case. Specificity is achieved either by providing information on a smaller set than the overall population, of which the target case is a member, or when information can be coded, via causality, as information about the specific members of a given population. The base-rate fallacy is thus the result of pitting what seem to be merely coincidental, therefore low-relevance, base rates against more specific, or causal, information. A series of probabilistic inference problems is presented in which relevance was manipulated with the means described above, and the empirical results confirm the above account. In particular, base rates will be combined with other information when the two kinds of information are perceived as being equally relevant to the judged case.},
url = {/files/dp732.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {sergiuhart-fac2019,
title = {Forecast-Hedging and Calibration},
journal = {Discussion Papers},
number = {731},
year = {2019},
month = {11},
abstract = {Calibration means that for each forecast x the average of the realized actions in the periods in which the forecast was x is, in the long run, close to x. Calibration can always be guaranteed (Foster and Vohra 1998), but it requires the forecasting procedure to be stochastic. By contrast, smooth calibration, which combines in a continuous manner nearby forecasts, can be guaranteed by a deterministic procedure (Foster and Hart 2018). In the present paper we develop the concept of forecast-hedging, which consists of choosing the forecasts in such a way that, no matter what the realized action will be, the expected forecasting track record can only improve. This approach integrates the existing calibration results by obtaining them all from the same simple basic argument, and at the same time differentiates between them according to the forecast-hedging tools that are used: deterministic and fixed point-based vs. stochastic and minimax-based. Additional benefits are new calibration procedures in the one-dimensional case that are simpler than all known such procedures, and a short proof for deterministic smooth calibration, in contrast to the complicated existing proof.},
url = {http://www.ma.huji.ac.il/hart/abs/calib-int.html},
author = {Sergiu Hart, Dean P. Foster}
}
@booklet {lavee-latiopfdpnb2019,
title = {Lay Attitudes Toward Involuntary Organ Procurement from Death-Row Prisoners: No, but},
journal = {Discussion Papers},
number = {727},
year = {2019},
month = {6},
publisher = {Behavioural Public Policy},
abstract = {A multi-item questionnaire concerning lay people{\textquoteright}s attitudes toward organ procurement without consent from executed prisoners was given to several hundred respondents. The items ranged from all-out condemnation ("It is tantamount to murder") to enthusiasm ("It is great to have this organ supply"). Overall, we found two guiding principles upheld by most respondents: (1) Convicts have as much a right to their bodies and organs as other people, so the practice should be judged by the same standards as those that guide organ procurement from any donor. Procuring organs without consent is wrong. (2) Benefiting from those organs should be held to more lenient standards than are demanded for their procurement. So, benefitting from these ill-gotten organs should be tolerated.},
url = {/files/db727.pdf},
author = {Lavee, Maya Bar-Hillel and Jacob}
}
@booklet {mayabar-hillel-mbtnasfsscacatar2019,
title = {Maya Bar-Hillel, Tom Noah, and Shane Frederick, Solving Stumpers, CRT and CRAT: Are the Abilities Related?},
journal = {Discussion Papers},
number = {729},
year = {2019},
month = {10},
publisher = {Judgement and Decision Making, Vol. 14, No. 5, September 2019, Pp. 620-623},
abstract = {Bar-Hillel, Noah and Frederick (2018) studied a class of riddles they called stumpers, which have simple, but curiously elusive, solutions. A canonical example is: "Andy is Bobbie{\textquoteright}s brother, but Bobbie is not Andy{\textquoteright}s brother. How come?" Though not discussed there, we found that the ability to solve stumpers correlates significantly with performance on items resembling the CRT (Cognitive Reflection Test) but not with performance on items from the CRAT (Compound Remote Associates Test). We report those results here.},
url = {/files/db729.pdf},
author = {Maya Bar-Hillel, Tom Noah, Shane Frederick}
}
@booklet {siedner-opbars2019,
title = {Optimal Pricing by a Risk-Averse Seller},
journal = {Discussion Papers},
number = {725},
year = {2019},
month = {5},
abstract = {We consider the basic setup of one seller, one buyer, and one good, where the seller is risk averse, and characterize the mechanism that maximizes the seller{\textquoteright}s expected utility. In contrast to the risk-neutral case, where a single deterministic price is optimal, we show that in the risk averse case the optimal mechanism consists of a continuum of lotteries.},
url = {/files/dp725.pdf},
author = {Tomer Siedner}
}
@booklet {sergiuhart-ppdao2019,
title = {Posterior Probabilities: Dominance and Optimism},
journal = {Discussion Papers},
number = {730},
year = {2019},
month = {11},
abstract = {The Bayesian posterior probability of the true state is stochastically dominated by that same posterior under the probability law of the true state. This generalizes to notions of "optimism" about posterior probabilities.},
url = {http://www.ma.huji.ac.il/hart/abs/posterior.html},
author = {Sergiu Hart, Yosef Rinott}
}
@booklet {bar-hillel-wdisie2019,
title = {Why Didn{\textquoteright}t I See It Earlier?},
journal = {Discussion Papers},
number = {728},
year = {2019},
month = {7},
publisher = {Sternberg, Robert J., Ed. My Biggest Research Mistake: Adventures and Misadventures in Psychological Research. SAGE Publications, 2019.},
url = {/files/dp728.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {neyman-avosoptottompcaro2018,
title = {Additive Valuations of Streams of Payoffs That Obey the Time-Value of Money Principle: Characterization and Robust Optimization},
journal = {Discussion Papers},
number = {718},
year = {2018},
month = {4},
abstract = {This paper characterizes the preferences over bounded infinite utility streams that satisfy the time-value of money principle and an additivity property, and preferences that in addition are impatient. Based on this characterization, the paper introduces a concept of optimization that is robust to a small imprecision in the specification of the preference, and proves that the set of feasible streams of payoffs of a finite Markov decision process admits such a robust optimization.},
url = {/files/dp718.pdf},
author = {Abraham Neyman}
}
@booklet {kristofferarnsfelthansen-tbmwacaabom2018,
title = {Big Match with a Clock and a Bit of Memory, The},
journal = {Discussion Papers},
number = {716},
year = {2018},
month = {2},
abstract = {The Big Match is a multi-stage two-player game. In each stage Player 1 hides one or two pebbles in his hand, and his opponent has to guess that number; Player 1 loses a point if Player 2 is correct, and otherwise he wins a point. As soon as Player 1 hides one pebble, the players cannot change their choices in any future stage.Blackwell and Ferguson (1968) give an $varepsilon$-optimal strategy for Player 1 that hides, in each stage, one pebble with a probability that depends on the entire past history. Any strategy that depends just on the clock or on a finite memory is worthless. The long-standing natural open problem has been whether every strategy that depends just on the clock and a finite memory is worthless.The present paper proves that there is such a strategy that is $varepsilon$-optimal. In fact, we show that just two states of memory are sufficient.},
url = {/files/dp716.pdf},
author = {Kristoffer Arnsfelt Hansen, Rasmus Ibsen-Jensen, Abraham Neyman}
}
@booklet {orlibobek-eonmconsitlkn2018,
title = {Effect of Nest-Site Microclimatic Conditions on Nesting Success in the Lesser Kestrel (Falco Naumanni)},
journal = {Discussion Papers},
number = {721},
year = {2018},
month = {7},
abstract = {Capsule: Microclimatic conditions in the nest of the Lesser Kestrel (Falco naumanni), particularly the percentage of time of extremely low humidity, affect breeding success.Aim: (1) To study the effect of within-nest temperature and humidity on nest productivity, and the correlation between nest productivity and the order of dates on which nests were occupied by the parents. (2) To compare microclimatic conditions in the nest, breeding success and order of occupation between nests under tile roofs and artificial nest boxes.Methods: Three different Lesser Kestrel colonies in Israel "one rural, one urban and one in an open country habitat. Data loggers, that measure temperature and humidity, were put in 39 nests for the entire breeding period. The number of fledglings was recorded for each nest, as well as the date of occupation.Results: (1) Full microclimatic data from 35 nests suggest that percentage of time of extremely low humidity is the major predictor of nest productivity. (2) The urban colony had the lowest breeding success of the three colonies. (3) Sites of more successful nests were occupied earlier. (4) No significant difference in mean productivity between nests in roofs and nest boxes, but nests in roofs were occupied earlier.Conclusion: Nest microclimate affects nesting success in addition to colony location.},
url = {/files/dp721.pdf},
author = {Orli Bobek, Adiv Gal, David Saltz, Uzi Motro}
}
@booklet {adivgal-eosfonsitlkn2018,
title = {Effect of Supplemental Feeding on Nesting Success in the Lesser Kestrel (Falco Naumanni)},
journal = {Discussion Papers},
number = {723},
year = {2018},
month = {9},
abstract = {The effect of food supplement to Lesser Kestrel (Falco naumanni) nests during the nestling period (from hatching to fledging) was studied in two nesting colonies in Israel - Alona and Jerusalem. Our hypotheses was that food supplement will have a greater effect on fledgling success in the food-limited, urban colony of Jerusalem, than in the rural colony of Alona. Indeed, food supplement had a significantly positive effect on breeding success in both colonies. However, and contrary to our prediction, the decrease in chick mortality between supplemented and control nests in Jersualem was actually smaller than in Alona. This implies either that additional factors, possibly urbanization associated, other than food limitation, might be responsible for the difference in nesting success of Lesser Kestrels between Alona and Jersualem, and/or that the amount of additional food provided to supplemented nests (three mice per chick per week), was not enough.},
url = {/files/dp723.pdf},
author = {Adiv Gal, David Saltz, Uzi Motro}
}
@booklet {bezalelpeleg-jabasmp2018,
title = {Judgements Aggregation by a Sequential Majority Procedure},
journal = {Discussion Papers},
number = {719},
year = {2018},
month = {6},
abstract = {We consider a standard model of judgment aggregation as presented, for example, in Dietrich (2015). For this model we introduce a sequential majority procedure (SMP) which uses the majority rule as much as possible. The ordering of the issues is assumed to be exogenous. The definition of SMP is given in Section 2. In Section 4 we construct an intuitive relevance relation for our model, closely related to conditional entailment, for our model. While in Dietrich (2015), the relevance relation is given exogenously as part of the model, we insist that the relevance relation be derived from the agenda. We prove that SMP has the property of independence of irrelevant issues (III) with respect to (the transitive closure of) our relevance relation. As III is weaker than the property of proposition-wise independence (PI) we do not run into impossibility results as does List (2004) who incorporates PI in some parts of his analysis. We proceed to characterize SMP by anonymity, restricted monotonicity, limited neutrality, restricted agenda property, and independence of past deliberations (see Section 3 for the precise details). SMP inherits the first three axioms from the Majority Rule. The axiom of restricted agenda property guarantees sequentiality. The most important axiom, independence of past deliberations (IPD), says that the choice at time (t+1) depends only on the choices in dates 1, ..., t and the judgments at (t +1) (and not on the judgments in dates 1, ..., t) . Also, we use this occasion to point out that Roberts (1991) characterization of choice by plurality voting may be adapted to our model.},
url = {/files/db719.pdf},
author = {Bezalel Peleg, Shmuel Zamir}
}
@booklet {mayabar-hillel-lpfrtcos2018,
title = {Learning Psychology from Riddles: The Case of Stumpers},
journal = {Discussion Papers},
number = {714},
year = {2018},
month = {2},
publisher = {Judgment and Decision Making 13.1 (2018): 112.},
abstract = {Riddles can teach us psychology when we stop to consider the psychological principles that make them work . This paper studies a particular class of riddles that we call stumpers, and provides analysis of the various principles (some familiar, some novel) that inhibit most people from finding the correct solution "or any solution "even though they find the answers obvious ex post. We restrict our analysis to four stumpers, propose the psychological antecedents of each, and provide experimental support for our conjectures},
url = {/files/db714.pdf},
author = {Maya Bar-Hillel, Tom Noah, Shane Frederick}
}
@booklet {shevywaner-mmciascws2018,
title = {Male Mate Choice in a Sexually Cannibalistic Widow Spider},
journal = {Discussion Papers},
number = {713},
year = {2018},
month = {2},
abstract = {Males of the brown widow spider,Latrodectusgeometricus(Theridiidae), invest energy in courtship displays and are often cannibalized after mating;accordingly, partial sex role reversal is expected. In this species, subadult females are able to mate and produce viable offspring. In contrast to mature females, these subadult females do not cannibalize their mates after copulation. Nevertheless, when given a choice, males preferred mature over subadult females and older over young mature females. We found no benefit for males in mating with the females of their choice. Older females weresignificantly less fecund than young mature females, and werenot more fecund than subadult females. We tested possible advantages in mating with cannibalistic (mature) females, such as an increased probability of plugging the female s genital duct or longer copulations,or disadvantages in mating with subadult females, such as higher remating risk. None of these explanations was supported. Thus, we lack an adaptive explanation for male preference for mature older females. We suggest that older females produce more pheromone to attract males and that males are thus misled into mating with older, more aggressive and less fecund females.},
url = {/files/db713.pdf},
author = {Shevy Waner, Uzi Motro, Yael Lubin, Ally R. Harari}
}
@booklet {bezalelpeleg-roppsbssgfas2018,
title = {Representations of Political Power Structures by Strategically Stable Game Forms: A Survey},
journal = {Discussion Papers},
number = {715},
year = {2018},
month = {2},
abstract = {We survey the results on representations of committees and constitutions by game forms that possess some kind of equilibrium strategies for each profile of preferences of the players. The survey is restricted to discrete models, that is, we deal with finitely many players and alternatives. No prior knowledge of social choice is assumed: As far as definitions are concerned, the paper is self-contained. Section 2 supplies the necessary general tools for the rest of the paper. Each definition is followed by a simple (but nontrivial) example. In Section 3 we give a complete account of representations of committees (proper and monotonic simple games), by exactly and strongly consistent social choice functions. We start with Peleg{\textquoteright}s representations of weak games, and then provide a complete and detailed account of Holzman{\textquoteright}s solution of the representation problem for simple games without veto players. In Section 4 we deal with representations of constitutions by game forms. Following Grdenfors we model a constitution by a monotonic and super additive effectivity function. We fully characterize the representations for three kinds of equilibrium: Nash equilibrium; acceptable equilibrium (Pareto optimal Nash equilibrium); and strong Nash equilibrium. We conclude in Section 5 with a report on two recent works on representations of constitutions under incomplete information.},
url = {/files/dp715.pdf},
author = {Bezalel Peleg, Ron Holzman}
}
@booklet {bezalelpeleg-sosccise2018,
title = {Self-Implementation of Social Choice Correspondences in Strong Equilibrium},
journal = {Discussion Papers},
number = {717},
year = {2018},
month = {4},
abstract = {A social choice correspondence is self-implementable in strong equilibrium if it is implementable in strong equilibrium by a social choice function selecting from the correspondence itself as a game form. We characterize all social choice correspondences implementable this way by an anonymous social choice function satisfying no veto power, given that the number of agents is large relative to the number of alternatives. It turns out that these are exactly the social choice correspondences resulting from feasible elimination procedures as introduced in Peleg (1978).},
url = {/files/dp717.pdf},
author = {Bezalel Peleg, Hans Peters}
}
@booklet {lotemelber-dorozko-sanr2018,
title = {Striatial Action-Value Neurons Reconsidered},
journal = {Discussion Papers},
number = {720},
year = {2018},
month = {7},
abstract = {It is generally believed that during economic decisions, striatal neurons represent the values associated with different actions. This hypothesis is based on studies, in which the activity of striatal neurons was measured while the subject was learning to prefer the more rewarding action. Here we show that these publications are subject to at least one of two critical confounds. First, we show that even weak temporal correlations in the neuronal data may result in an erroneous identification of action-value representations. Second, we show that experiments and analyses designed to dissociate action-value representation from the representation of other decision variables cannot do so. We suggest solutions to identifying action-value representation that are not subject to these confounds. Applying one solution to previously identified action-value neurons in the basal ganglia we fail to detect action-value representations. We conclude that the claim that striatal neurons encode action-values must await new experiments and analyses.},
url = {/files/dp720.pdf},
author = {Lotem Elber-Dorozko, Yonatan Loewenstein}
}
@booklet {oneill-taacs2018,
title = {Two-Party Agreements as Circular Sets},
journal = {Discussion Papers},
number = {722},
year = {2018},
month = {9},
abstract = {In making an agreement with someone, I conditionally promise to perform a certain action, conditioning my obligation on their both making a corresponding promise and performing their action. What promise should I require? That they simply commit to perform is not enough. I should demand the kind of promise I am making myself, and they should demand the same of me. This makes our promises indirectly self-referential. Assuming the performance actions are specified, my promise can be characterized as a set of available promises, all those the other could make to activate my obligation. We have an agreement if each one s promise is a member of the other s promise. Assume that the set P of available promises satisfies (1) Aczel s axiom for circular sets; (2) transitivity: if the obligation of $p {\textquoteright}in P$ is activated by $p $, then $p {\textquoteright}in P$; and (3) superset closure: if $p {\textquoteright}in P$ is activated by $p $, $p$ is activated by any promise that implies (is a superset of) $p $. The focus is on bargaining procedures that treat the parties symmetrically (e.g., no specified offerer or accepter.) Each party chooses an agreement promise $p*$ such that (4) if both make $p*$ and one performs, the other is obligated to perform; (5) if one makes $p*$ and the other does not, the former is not unilaterally obligated. It is shown that among available promise sets of a given size, exactly one contains an agreement promise and contains exactly one of them.},
url = {/files/dp722.pdf},
author = {Barry O{\textquoteright}Neill}
}
@booklet {bar-hillel-tulosmc2018,
title = {Unbearable Lightness of Self-Induced Mind Corruption, The},
journal = {Discussion Papers},
number = {724},
year = {2018},
month = {11},
abstract = {Talk delivered at the opening of SPUDM 26, Haifa, Israel, August 2017.},
url = {/files/dp724.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {ohaddan-aocnowiroscd2017,
title = {ASSOCIATION OF CATASTROPHIC NEONATAL OUTCOMES WITH INCREASED RATE OF SUBSEQUENT CESAREAN DELIVERIES},
journal = {Discussion Papers},
number = {707},
year = {2017},
month = {4},
abstract = {Objective: To evaluate whether full-term deliveries resulting in neonates diagnosed with hypoxic-ischemic encephalopathy are associated with a significant increase in the rate of subsequent unscheduled cesarean deliveries. Methods: We conducted a retrospective chart review study and examined all deliveries in the department of Obstetrics and Gynecology at Hadassah University Hospital, Mt. Scopus campus, Jerusalem, Israel during 2009-2014. We reviewed all cases of hypoxic-ischemic encephalopathy in singleton, term, liveborn deliveries and identified seven such cases: three of which were attributed to obstetric mismanagement and four which were not. We measured the rate of unscheduled cesarean deliveries before and after the events and their respective hazard ratio (HR). Results: Prior to a mismanaged delivery resulting in hypoxic-ischemic encephalopathy, the baseline rate of unscheduled cesarean deliveries was approximately 80 unscheduled cesarean deliveries for every 1,000 deliveries. In the first 4 weeks immediately after each of the three identified cases, there was a significant increase in the rate of unscheduled cesarean deliveries by an additional 48 unscheduled cesarean deliveries per 1,000 deliveries (95\% CI 27-70/1,000). This increase was transient and lasted approximately 4 weeks. We estimated that each case was associated with approximately 17 additional unscheduled cesarean deliveries (95\% confidence interval 8-27). There was no increase in the rate of unscheduled cesarean deliveries in cases of hypoxic-ischemic encephalopathy that were not associated with mismanagement. Conclusion: The increase in the rate of unscheduled cesarean deliveries after a catastrophic neonatal outcome may result in short-term changes in obstetricians risk evaluation.},
url = {/files/dp707.pdf},
author = {OHAD DAN, DRORITH HOCHNER- CELNIKIER, AMY SOLNICA, YONATAN LOEWENSTEIN}
}
@booklet {sergiuhart-tbhoss2017,
title = {Better Half of Selling Separately, The},
journal = {Discussion Papers},
number = {712},
year = {2017},
month = {12},
abstract = {Separate selling of two independent goods is shown to yield at least 62\% of the optimal revenue, and at least 73\% when the goods satisfy the Myerson regularity condition. This improves the 50\% result of Hart and Nisan (2017, originally circulated in 2012).},
url = {http://www.ma.huji.ac.il/hart/abs/srev.html},
author = {Sergiu Hart, Philip J. Reny}
}
@booklet {abrahamneyman-csg2017,
title = {COOPERATIVE STRATEGIC GAMES},
journal = {Discussion Papers},
number = {706},
year = {2017},
month = {2},
abstract = {We examine a solution concept, called the value, for n-person strategic games. In applications, the value provides an a-priori assessment of the monetary worth of a player s position in a strategic game, comprising not only the player s contribution to the total payoff but also the player s ability to inflict losses on other players. A salient feature is that the value takes account of the costs that spoilers impose on themselves. Our main result is an axiomatic characterization of the value. For every subset, S, consider the zero-sum game played between S and its complement, where the players in each of these sets collaborate as a single player, and where the payoff is the difference between the sum of the payoffs to the players in S and the sum of payoffs to the players not in S. We say that S has an effective threat if the minmax value of this game is positive. The first axiom is that if no subset of players has an effective threat then all players are allocated the same amount. The second axiom is that if the overall payoff to the players in a game is the sum of their payoffs in two unrelated games then the overall value is the sum of the values in these two games.},
url = {/files/dp706.pdf},
author = {ABRAHAM NEYMAN , ELON KOHLBERG}
}
@booklet {elonkohlberg-got2017,
title = {Games of Threats},
journal = {Discussion Papers},
number = {710},
year = {2017},
month = {9},
abstract = {A game of threats on a finite set of players, $N$, is a function $d$ that assigns a real number to any coalition, $S subseteq N$, such that $d left( S right) = - d left( N setminus S right)$. A game of threats is not necessarily a coalitional game as it may fail to satisfy the condition $d left( emptyset right) = 0$. We show that analogs of the classic Shapley axioms for coaltional games determine a unique value for games of threats. This value assigns to each player an average of the threat powers, $d left( S right)$, of the coalitions that include the player.},
url = {/files/dp710.pdf},
author = {Elon Kohlberg, Abraham Neyman}
}
@booklet {shiracohen-zimerman-immtbgyiefooa2017,
title = {Implicit Motivation Makes the Brain Grow Younger: Improving Executive Functions of Older Adults},
journal = {Discussion Papers},
number = {705},
year = {2017},
month = {1},
abstract = {The dominant view of cognitive aging holds that while controlled processes (e.g., working memory and executive functions) decline with age, implicit (automatic) processes do not. In this paper we challenge this view by arguing that high-level automatic processes (e.g., implicit motivation) decline with age, and that this decline plays an important and as yet unappreciated role in cognitive aging. Specifically, we hypothesized that due to their decline, high-level automatic processes are less likely to be spontaneously activated in old age, and so their subtle, external activation should have stronger effects on older (vs. younger) adults. In two experiments we used different methods of implicitly activating motivation, and measured executive functions of younger and older adults via the Wisconsin Card Sorting Test. In Experiment 1 we used goal priming to subtly increase achievement motivation. In Experiment 2 motivation was manipulated by subtly increasing engagement in the task. More specifically, we introduce the Jerusalem Face Sorting Test (JFST), a modified version of the WCST that uses cards with faces instead of geometric shapes. In both experiments, implicitly induced changes in motivation improved older- but not younger- adults executive functioning. The framework we propose is general, and it has implications as to how we view and test cognitive functions. Our case study of older adults offers a new look at various aspects of cognitive aging. Applications of this view to other special populations (e.g., ADHD, schizophrenia) and possible interventions are discussed.},
url = {/files/dp705.pdf},
author = {Shira Cohen-Zimerman, Ran R. Hassin}
}
@booklet {hart-rvtmlmptv2017,
title = {Repeat Voting: Two-Vote May Lead More People To Vote},
journal = {Discussion Papers},
number = {711},
year = {2017},
month = {10},
abstract = {A repeat voting procedure is proposed, whereby voting is carried out in two identical rounds. Every voter can vote in each round, the results of the first round are made public before the second round, and the final result is determined by adding up all the votes in both rounds. It is argued that this simple modification of election procedures may well increase voter participation and result in more accurate and representative outcomes.},
url = {http://www.ma.huji.ac.il/hart/abs/2vote.html},
author = {Sergiu Hart}
}
@booklet {bezalelpeleg-saoj2017,
title = {Sequential Aggregation of Judgments},
journal = {Discussion Papers},
number = {708},
year = {2017},
month = {5},
abstract = {We consider a standard model of judgment aggregation as presented, for example, in Dietrich (2015). For this model we introduce a sequential aggregation procedure (SAP) which uses the majority rule as much as possible. The ordering of the issues is assumed to be exogenous. The exact definition of SAP is given in Section 3. In Section 4 we construct an intuitive relevance relation for our model, closely related to conditional entailment. Unlike Dietrich (2015), where the relevance relation is given exogenously as part of the model, we require that the relevance relation be derived from the agenda. We prove that SAP has the property of independence of irrelevant issues (III) with respect to (the transitive closure of) our relevance relation. As III is weaker than the property of proposition-wise independence (PI) we do not run into impossibility results as does List (2004) who incorporates PI in some parts of his analysis. We proceed to characterize SAP by anonymity, restricted monotonicity, local neutrality, restricted agenda property, and independence of past deliberations (see Section 5 for the precise details). Also, we use this occasion to show that Roberts s (1991) characterization of choice by plurality voting can be adapted to our model.},
url = {/files/dp708.pdf},
author = {Bezalel Peleg, Shmuel Zamir}
}
@booklet {johannesmller-trede-twocimot2017,
title = {Wisdom of Crowds in Matters of Taste, The},
journal = {Discussion Papers},
number = {709},
year = {2017},
month = {6},
abstract = {Decision makers can often improve the accuracy of their judgments on factual matters by consulting {\textquoteright}crowds of others for their respective opinions. In this article, we investigate whether decision makers could similarly draw on crowds to improve the accuracy of their judgments about their own tastes and hedonic experiences. We present a theoretical model which states that accuracy gains from consulting a crowds judgments of taste depend on the interplay among taste discrimination, crowd diversity, and the similarity between the crowd s preferences and those of the decision maker. The model also delineates the boundary conditions for such {\textquoteright}crowd wisdom. Evidence supporting our hypotheses was found in two laboratory studies in which decision makers made judgments about their own enjoyment of musical pieces and short films. Our findings suggest that, although different people may have different preferences and inclinations, their judgments of taste can benefit from the wisdom of crowds.},
url = {/files/dp709.pdf},
author = {Johannes M{\textonequarter}ller-Trede, Shoham Choshen-Hillel, Meir Barneron, Ilan Yaniv}
}
@booklet {nehama-agwaptutmdm2016,
title = {Analyzing Games with Ambiguous Player Types Using the MINthenMAX Decision Model},
journal = {Discussion Papers},
number = {700},
year = {2016},
month = {8},
abstract = {In many common interactive scenarios, participants lack information about other participants, and specifically about the preferences of other participants. In this work, we model an extreme case of incomplete information, which we term games with type ambiguity, where a participant lacks even information enabling him to form a belief on the preferences of others. Under type ambiguity, one cannot analyze the scenario using the commonly used Bayesian framework, and therefore one needs to model the participants using a different decision model. To this end, we present the MINthenMAX decision model under ambiguity. This model is a refinement of Wald s MiniMax principle, which we show to be too coarse for games with type ambiguity. We characterize MINthenMAX as the finest refinement of the MiniMax principle that satisfies three properties we claim are necessary for games with type ambiguity. This prior-less approach we present here also follows the common practice in computer science of worst-case analysis. Finally, we define and analyze the corresponding equilibrium concept, when all players follow MINthenMAX. We demonstrate this equilibrium by applying it to two common economic scenarios: coordination games and bilateral trade. We show that in both scenarios, an equilibrium in pure strategies always exists, and we analyze the equilibria.},
url = {/files/dp700.pdf},
author = {Ilan Nehama}
}
@booklet {procaccia-cbor2016,
title = {Corporate Bill of Rights},
journal = {Discussion Papers},
number = {698},
year = {2016},
month = {3},
abstract = {Abstract: Corporate entities enjoy legal subjectivity in a variety of forms, but they are not human beings. This paper explores, from a normative point of view, one of the limits that ought to be imposed on the capacity of corporations to be treated "as if" they had a human nature, their recognition as legitimate bearers of basic human rights. The assertion that corporations, like living persons, are entitled to constitutional protection was famously brought to the fore by a number of recent Supreme Court cases, most notably the Citizens United and the Hobby Lobby cases. In the rational choice analysis that follows this paper reveals that the new jurisprudence emanating from Citizens United may be justified in the relatively insignificant cases of small companies with egalitarian distribution of shares, but ought to be rejected in the more meaningful cases of large public corporations with controlling stockholders. The ruling in Hobby Lobby, on the other hand, can be defended regardless of the size of the corporation or the composition of its owners. In both of these cases it is not the rights of the corporate entity which is truly at stake and the final outcome ought to hinge on the constitutional rights of real human beings.},
url = {/files/dp698.pdf},
author = {Uriel Procaccia}
}
@booklet {urielprocaccia-ccapb2016,
title = {Corporate Crime and Plea Bargains},
journal = {Discussion Papers},
number = {697},
year = {2016},
month = {3},
abstract = {Corporate entities enjoy legal subjectivity in a variety of forms, but they are not human beings, and hence their legal capacity to bear rights and obligations of their own is not universal. This paper explores, from a normative point of view, one of the limits that ought to be set on the capacity of corporations to act "as if" they had a human nature, their capacity to commit crime. Accepted wisdom has it that corporate criminal liability is justified as a measure to deter criminal behavior. Our analysis supports this intuition in one subset of cases, but also reveals that deterrence might in fact be undermined in another subset of cases, especially in an environment saturated with plea bargains involving serious violations of the law.},
url = {/files/dp697.pdf},
author = {Uriel Procaccia, Eyal Winter}
}
@booklet {elchananben-porath-dac2016,
title = {Disclosure and Choice},
journal = {Discussion Papers},
year = {2016},
month = {2},
abstract = {An agent chooses among projects with random outcomes. His payoff is increasing in the outcome and in an observer{\textquoteright}s expectation of the outcome. With some probability, the agent can disclose the true outcome to the observer. We show that choice is inefficient: the agent favors riskier projects even with lower expected returns. If information can be disclosed by a challenger who prefers lower beliefs of the observer, the chosen project is excessively risky when the agent has better access to information, excessively riskaverse when the challenger has better access, and efficient otherwise. We also characterize the agent{\textquoteright}s worst-case equilibrium payoff.},
url = {/files/dp694.pdf},
author = {Elchanan Ben-Porath, Eddie Dekel, Barton L. Lipman}
}
@booklet {ben-porath-dac2016,
title = {Disclosure and Choice},
journal = {Discussion Papers},
number = {694},
year = {2016},
month = {2},
abstract = {An agent chooses among projects with random outcomes. His payoff is increasing in the outcome and in an observer{\textquoteright}s expectation of the outcome. With some probability, the agent can disclose the true outcome to the observer. We show that choice is inefficient: the agent favors riskier projects even with lower expected returns. If information can be disclosed by a challenger who prefers lower beliefs of the observer, the chosen project is excessively risky when the agent has better access to information, excessively riskaverse when the challenger has better access, and efficient otherwise. We also characterize the agent{\textquoteright}s worst-case equilibrium payoff.},
url = {/files/dp694.pdf},
author = {Elchanan Ben-Porath}
}
@booklet {navon-essorgatforp2016,
title = {Evolutionarily Stable Strategies of Random Games and the Facets of Random Polytopes},
journal = {Discussion Papers},
number = {702},
year = {2016},
month = {9},
abstract = {An evolutionarily stable strategy (ESS) is an equilibrium strategy that is immune to invasions by rare alternative (mutant) strategies. Unlike Nash equilibria, ESS do not always exist in finite games. In this paper we address the question of what happens when the size of the game increases: does an ESS exist for almost every large game? We let the entries of an n {\textendash}- n game matrix be independently randomly chosen according to a symmetrical subexponential distribution F, and study the expected number of ESS with support of size d as n {\textdagger}{\textquoteright} {\textasciicircum}\v z. In a previous paper by Hart, Rinott and Weiss [6] it was shown that this limit is 1 2 for d = 2. This paper deals with the case of d {\textyen} 4, and proves the conjecture in [6] (Section 6,c), that the expected number of ESS with support of size d {\textyen} 4 is 0. Furthermore, it discusses the classic problem of the number of facets of a convex hull of n random points in Rd, and relates it to the above ESS problem. Given a collection of i.i.d. random points, our result implies that the expected number of facets of their convex hull converges to 2d as n {\textdagger}{\textquoteright} {\textasciicircum}\v z.},
url = {/files/dp702.pdf},
author = {Ohad Navon}
}
@booklet {sophiebade-gssaos2016,
title = {GIBBARD-SATTERTHWAITE SUCCESS STORIES AND OBVIOUS STRATEGYPROOFNESS},
journal = {Discussion Papers},
number = {704},
year = {2016},
month = {10},
abstract = {The Gibbard-Satterthwaite Impossibility Theorem (Gibbard, 1973; Satterthwaite, 1975) holds that dictatorship is the only unanimous and strategyproof social choice function on the full domain of preferences. Much of the work in mechanism design aims at getting around this impossibility theorem. Three grand success stories stand out. On the domains of single peaked preferences, house matching, and of quasilinear preferences, there are appealing unanimous and strategyproof social choice functions. We investigate whether these success stories are robust to strengthening strategyproofness to obvious strategyproofness, recently introduced by Li (2015). A social choice function is obviously strategyproof implementable (OSP) implementable if even cognitively limited agents can recognize their strategies as weakly dominant. For single-peaked preferences, we characterize the class of OSP-implementable and unanimous social choice rules as dictatorships with safeguards against extremism mechanisms (which turn out to also be Pareto optimal) in which the dictator can choose the outcome, but other agents may prevent the dictator from choosing an outcome which is too extreme. Median voting is consequently not OSP-implementable. Indeed the only OSP-implementable quantile rules either choose the minimal or the maximal ideal point. For house matching, we characterize the class of OSP-implementable and Pareto optimal matching rules as sequential barter with lurkers a significant generalization over bossy variants of bipolar serially dictatorial rules. While Li (2015) shows that second-price auctions are OSP-implementable when only one good is sold, we show that this positive result does not extend to the case of multiple goods. Even when all agents preferences over goods are quasilinear and additive, no welfare-maximizing auction where losers pay nothing is OSP-implementable when more than one good is sold. Our analysis makes use of a gradual revelation principle, an analog of the (direct) revelation principle for OSP mechanisms that we present and prove.},
url = {/files/dp704.pdf},
author = {SOPHIE BADE , YANNAI A. GONCZAROWSKI}
}
@booklet {hananshteingart-hsoseirsgbniol2016,
title = {Heterogeneous Suppression of Sequential Effects in Random Sequence Generation, but Not in Operant Learning},
journal = {Discussion Papers},
number = {701},
year = {2016},
month = {9},
abstract = {There is a long history of experiments, in which participantsare instructed to generate a long sequence of binary random numbers.The scope of this line of research has shifted over the years from identifying the basic psychological principles and/or the heuristics that lead to deviations from randomness, to one of predicting future choices. In this paper,we usedgeneralized linear regression and the framework of Reinforcement Learning in order to address both points. In particular, weused logistic regression analysis in order to characterize the temporal sequence of participants\’ choices. Surprisingly, a population analysis indicated that the contribution of the most recent trial has only a weak effect on behavior, compared to more preceding trials, a result that seem irreconcilable with standard sequential effects that decay monotonously with the delay. However, when considering each participant separately, we found that the magnitudes of the sequential effect area monotonousdecreasing function of the delay, yet these individual sequential effectsare largely averaged outin a population analysis because of heterogeneity.The substantial behavioral heterogeneity in this task is further demonstrated quantitatively by considering the predictive power of the model. We show that a heterogeneous model of sequential dependencies captures the structure available in random sequence generation.Finally, we show that the results of the logistic regression analysis can be interpreted in the framework of reinforcement learning, allowing us to compare the sequential effects in the random sequence generation task to those in an operant learning task. We show that in contrast to the random sequence generation task, sequential effects in operant learning are far more homogenous across the population. These results suggest that in therandom sequence generation task, different participants adoptdifferent cognitive strategiesto suppress sequential dependencies when generating the \“random\” sequences.},
url = {/files/dp701.pdf},
author = {Hanan Shteingart, Yonatan Loewenstein}
}
@booklet {keren-tlol2016,
title = {Logic of Love, The},
journal = {Discussion Papers},
number = {695},
year = {2016},
month = {3},
abstract = {This philosophical work lays the groundwork for a game-theoretic account of (romantic) love, substantiating the folk-psychological conception of love as {\textquoteright}a unification of souls{\textquoteright}. It does so by setting up an appropriate universal framework of cognitive agency, that accommodates such unifications and motivates them. This framework applies the gene s eye view of evolution to the evolution of cognition, integrating it with a distributed, dynamic theory of selfhood "and the game-theoretic principles of agent-unification that govern these dynamics. The application of this framework to particular biological settings produces love as a theoretical evolutionary prediction (unveiling its rationality). Through this, the connection of the strategic normativity to love{\textquoteright}s real-life behavioral and phenomenological expressions is systematically explored.},
url = {/files/db695.pdf},
author = {Aviv Keren}
}
@booklet {bar-hillel-rtrst2016,
title = {Reply to Rodway, Schepman \& Thoma (2016)},
journal = {Discussion Papers},
number = {699},
year = {2016},
month = {4},
url = {/files/dp699.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {bezalelpeleg-sajldorr2016,
title = {Sequential Aggregation Judgments: Logical Derivation of Relevance Relation},
journal = {Discussion Papers},
number = {703},
year = {2016},
month = {9},
abstract = {Following Dietrich (2014) we consider using choice by plurality voting (CPV) as a judgment aggregation correspondence. We notice that a result of Roberts (1991) implies that CPV is axiomatically characterized by anonymity, neutrality, unanimity, and (Young s) reinforcement. Following List (2004) and Dietrich (2015) we construct a sequential voting procedure of judgement aggregation which satisfies rationality, anonymity, unanimity, and independence of irrelevant propositions (with respect to a relevance correspondence that does not satisfy transitivity). We offer a tentative characterization for this aggregation procedure},
url = {/files/dp703.pdf},
author = {Bezalel Peleg, Shmuel Zamir}
}
@booklet {davidheyd-ttqfarc2016,
title = {Thorny Quest for a Rational Constitution, The},
journal = {Discussion Papers},
number = {696},
year = {2016},
month = {3},
url = {/files/dp696.pdf},
author = {David Heyd, Uriel Procaccia, Uzi Segal}
}
@booklet {bezalelpeleg-obsocng2015,
title = {On Bargaining Sets of Convex NTU Games},
journal = {Discussion Papers},
number = {681},
year = {2015},
month = {4},
publisher = {IGTR{\textquoteright}s Volume 17, Issue 4, Pages 1-7.},
abstract = {We show that the Aumann-Davis-Maschler bargaining set and the Mas-Colell bargaining set of a non-leveled NTU game that is either ordinal convex or coalition merge convex coincides with the core of the game. Moreover, we show by means of an example that the foregoing statement may not be valid if the NTU game is marginal convex.},
url = {/files/IGTR Volume No.17, Issue No. 4, Pages 1-7},
author = {Bezalel Peleg, Peter Sudholter}
}
@booklet {yosefrinott-coahhpbmas2015,
title = {Comments on a Hot Hand Paper by Miller and Sanjurjo (2015)},
journal = {Discussion Papers},
number = {688},
year = {2015},
month = {8},
abstract = {Miller and Sanjurjo (2015) suggest that many analyses of the hot hand and the gambler s fallacies are subject to a bias. The purpose of this note is to describe our understanding of their main point in terms we hope are simpler and more accessible to non-mathematicians than is the original.},
url = {/files/dp688.pdf},
author = {Yosef Rinott, Maya Bar-Hillel}
}
@booklet {sagijaffe-dax-acmoimcdpd2015,
title = {A Computational Model of Implicit Memory Captures Dyslexics Perceptual Deficits},
journal = {Discussion Papers},
number = {690},
year = {2015},
month = {9},
abstract = {Dyslexics are diagnosed for their poor reading skills. Yet they characteristically also suffer from poor verbal memory, and often from poor auditory skills. To date, this combined profile has been accounted for in broad cognitive terms. Here, we hypothesize that the perceptual deficits associated with dyslexia can be understood computationally as a deficit in integrating prior information with noisy observations. To test this hypothesis we analyzed the performance of human participants in an auditory discrimination task using a two-parameter computational model. One parameter captures the internal noise in representing the current event, and the other captures the impact of recently acquired prior information. Our findings show that dyslexics perceptual deficit can be accounted for by inadequate adjustment of these components; namely, low weighting of their implicit memory of past trials relative to their internal noise. Underweighting the stimulus statistics decreased dyslexics ability to compensate for noisy observations. ERP measurements (P2 component) while participants watched a silent movie, indicated that dyslexics perceptual deficiency may stem from poor automatic integration of stimulus statistics. Taken together, this study provides the first description of a specific computational deficit associated with dyslexia.},
url = {/files/dp690.pdf},
author = {Sagi Jaffe-Dax, Ofri Raviv, Nori Jacoby, Yonatan Loewenstein, Merav Ahissar}
}
@booklet {aumann-acffttora2015,
title = {A Conceptual Foundation for the Theory of Risk Aversion},
journal = {Discussion Papers},
number = {686},
year = {2015},
month = {6},
abstract = {Classically, risk aversion is equated with concavity of the utility function. In this work we explore the conceptual foundations of this definition. In accordance with neo-classical economics, we seek an ordinal definition, based on the decisions maker s preference order, independent of numerical values. We present two such definitions, based on simple, conceptually appealing interpretations of the notion of risk-aversion. We then show that when cast in quantitative form these ordinal definitions coincide with the classical Arrow-Pratt definition (once the latter is defined with respect to the appropriate units), thus providing a conceptual foundation for the classical definition. The implications of the theory are discussed, including, in particular, to the understanding of insurance. The entire study is within the expected utility framework.},
url = {/files/dp686.pdf},
author = {Yonatan Aumann}
}
@booklet {elonkohlberg-tcsosg2015,
title = {Cooperative Solution of Stochastic Games, The},
journal = {Discussion Papers},
number = {679},
year = {2015},
month = {2},
abstract = {Building on the work of Nash, Harsanyi, and Shapley, we define a cooperative solution for strategic games that takes account of both the competitive and the cooperative aspects of such games. We prove existence in the general (NTU) case and uniqueness in the TU case. Our main result is an extension of the definition and the existence and uniqueness theorems to stochastic games - discounted or undiscounted.},
url = {/files/dp679.pdf},
author = {Elon Kohlberg, Abraham Neyman}
}
@booklet {hananshteingart-teossacsopeb2015,
title = {Effect of Sample Size and Cognitive Strategy on Probability Estimation Bias, The},
journal = {Discussion Papers},
number = {680},
year = {2015},
month = {2},
abstract = {Probability estimation is an essential cognitive function in perception, motor control, and decision making. Many studies have shown that when making decisions in a stochastic operant conditioning task, people and animals behave as if they underestimatethe probability of rare events. It is commonly assumed that this behavior is a natural consequence of estimating a probability from a small sample, also known as sampling bias. The objective of this paper is to challenge this common lore. We show that in fact, probabilities estimated from a small sample can lead to behaviors that will be interpreted as underestimatingor as overestimating the probability of rare events, depending on the cognitive strategy used. Moreover, this sampling bias hypothesis makes an implausible prediction that minute differences in the values of the sample size or the underlying probability will determine whether rare events will be underweighted or overweighed. We discuss the implications of this sensitivity for the design and interpretation of experiments. Finally, we propose an alternative sequential learning model with a resetting of initial conditions for probability estimation and show that this model predicts the experimentally-observed robust underweighting of rare events.},
url = {/files/dp680.pdf},
author = {Hanan Shteingart, Yonatan Loewenstein}
}
@booklet {sergiuhart-egtac2015,
title = {Evidence Games: Truth and Commitment},
journal = {Discussion Papers},
number = {684},
year = {2015},
month = {5},
abstract = {An evidence game is a strategic disclosure game in which an agent who has different pieces of verifiable evidence decides which ones to disclose and which ones to conceal, and a principal chooses an action (a "reward"). The agent{\textquoteright}s preference is the same regardless of his information (his "type") he always prefers the reward to be as high as possible whereas the principal prefers the reward to fit the agent{\textquoteright}s type. We compare the setup where the principal chooses the action only after seeing the disclosed evidence, to the setup where the principal can commit ahead of time to a reward policy (the latter is the standard mechanism-design setup). We compare the setup where the principal chooses the action only after seeing the disclosed evidence to the setup where the principal can commit ahead of time to a reward policy (the mechanism-design setup). The main result is that under natural conditions on the truth structure of the evidence, the two setups yield the same equilibrium outcome.},
url = {http://www.ma.huji.ac.il/hart/abs/st-ne.html},
author = {Sergiu Hart, Ilan Kremer, Motty Perry}
}
@booklet {bezalelpeleg-fepiscaac2015,
title = {Feasible Elimination Procedures in Social Choice: an Axiomatic Characterization},
journal = {Discussion Papers},
number = {693},
year = {2015},
month = {12},
abstract = {Feasible elimination procedures (Peleg, 1978) play a central role in constructing social choice functions which have the following property: in the associated game form, for any preference profile there exists a strong Nash equilibrium resulting in the sincere outcome. In this paper we provide an axiomatic characterization of the social choice correspondence resulting from applying feasible elimination procedures. The axioms are anonymity, Maskin monotonicity, and independent blocking.},
url = {/files/dp693.pdf},
author = {Bezalel Peleg, Hans Peters}
}
@booklet {zik-iimoipamv2015,
title = {Implementation in Models of Independent, Private, and Multivariate Values},
journal = {Discussion Papers},
number = {689},
year = {2015},
month = {9},
abstract = {We consider the problem of implementation in models of independent private values in which the valuation an agent attributes to a particular alternative is a function from a multidimensional Euclidean space to the real line. We first consider implementation by standard mechanisms, that include a decision rule and a profile of personal transfers. We present impossibility results on the implementation of decision rules that assign different outcomes to profiles of signals that result in the same profile of valuations. We then consider implementation by extended mechanisms that include, in addition to a decision rule and a profile of personal transfers, a profile of functions that affect the arguments of the valuation functions. We show that decision rules that assign different outcomes to profiles of signals that result in the same profile of valuations can be implemented by such mechanisms.},
url = {/files/dp689.pdf},
author = {Boaz Zik}
}
@booklet {bar-hillel-peicfsdacs2015,
title = {Position Effects in Choice from Simultaneous Displays: A Conundrum Solved},
journal = {Discussion Papers},
number = {678},
year = {2015},
month = {1},
publisher = {Perspectives on Psychological Science 2015, Vol. 10(4), P. 419 {\textquotedblleft}433.},
abstract = {From drop-down computer menus to department-store aisles, people in everyday life often choose from simultaneous displays of products or options. Studies of position effects in such choices show seemingly inconsistent results. For example, in restaurant choice, items enjoy an advantage when placed at the beginning or end of the menu listings, but in multiple-choice tests, answers are more popular when placed in the middle of the offered list. When reaching for a bottle on a supermarket shelf, bottles in the middle of the display are more popular. But on voting ballots, first is the most advantageous position. Some of the effects are quite sensible, while others are harder to justify and can aptly be regarded as biases. This paper attempts to put position effects into a unified and coherent framework, and to account for them simply, using a small number of familiar psychological principles.},
url = {/files/dp678JournalVer.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {moshehaviv-raomq2015,
title = {Regulating an Observable M/M/1 Queue},
journal = {Discussion Papers},
number = {691},
year = {2015},
month = {9},
abstract = {Naor (1969) was the first to observe that in a single-server memoryless queue, customers who inspect the queue length upon arrival and accordingly decide whether to join or not may join even if from the social point of view they are worse of. The question then is how to mechanically design the system such that customers will join only queue lengths that are advised by society, while still minding their own selfish utility. After reviewing some existing mechanisms (some involving money transfers and some not), we suggest novel ones that do not involve money transfers. They possess some advantages over the existing ones, which we itemize.},
url = {/files/dp691.pdf},
author = {Moshe Haviv, Binyamin Oz}
}
@booklet {siedner-romgaaa2015,
title = {Risk of Monetary Gambles: An Axiomatic Approach},
journal = {Discussion Papers},
number = {682},
year = {2015},
month = {4},
abstract = {In this work we present five axioms for a risk-order relation defined over (monetary) gambles. We then characterize an index that satisfies all these axioms "the probability of losing money in a gamble multiplied by the expected value of such an outcome "and prove its uniqueness. We propose to use this function as the risk of a gamble. This index is continuous, homogeneous, monotonic with respect to first- and second-order stochastic dominance, and simple to calculate. We also compare our index with some other risk indices mentioned in the literature.},
url = {/files/dp682.pdf},
author = {Tomer Siedner}
}
@booklet {weiss-trwtbp2015,
title = {Robber Wants To Be Punished, The},
journal = {Discussion Papers},
number = {685},
year = {2015},
month = {5},
abstract = {It is a commonly held intuition that increasing punishment leads to less crime. Let{\textquoteright}s move our glance from the punishment for the crime itself to the punishment for the attempt to commit a crime, or to the punishment for the threat to carry it out. We{\textquoteright}ll argue that the greater the punishment for the attempt to rob, i.e. for the threat, "give me your money or else {\textbrokenbar}", the greater the number of robberies and attempts there will be. The punishment for the threat makes the withdrawal from it more expensive for the criminal, making the relative cost of committing the crime lower. In other words, the punishment of the attempt turns the attempt into a commitment by the robber, while at the same time turning an incredible threat into a credible one. Therefore, the robber has a strong interest in a legal system that increases the punishment of the attempt.},
url = {/files/dp685.pdf},
author = {Uri Weiss}
}
@booklet {omeredhan-sapi2015,
title = {Sex And Portfolio Investment},
journal = {Discussion Papers},
number = {683},
year = {2015},
month = {4},
abstract = {We attempt to answer why sex is nearly ubiquitous when asexual reproduction is ostensibly more efficient than sexual reproduction. From the perspective of a genetic allele, each individual bearing that allele is akin to a stock share yielding dividends equal to that individual{\textquoteright}s number of offspring, and the totality of individuals bearing the allele is its portfolio investment. Alleles compete over portfolio growth, and evolutionary reproduction strategies are essentially on-line learning algorithms seeking improved portfolio growth, with sexual reproduction a goal-directed algorithmic exploration of genotype space by sampling in each generation. The model assumes a stochastically changing environment but not weak selection. We show that in finite population models the algorithm of sexual reproduction yields, with high probability, higher expected growth than the algorithm of asexual reproduction does, proposing this as an explanation to why a majority of species reproduce sexually.},
url = {/files/dp683.pdf},
author = {Omer Edhan, Ziv Hellman, Dana Sherill-Rofe}
}
@booklet {deanpfoster-sclfafr2015,
title = {Smooth Calibration, Leaky Forecasts, and Finite Recall},
journal = {Discussion Papers},
number = {692},
year = {2015},
month = {9},
abstract = {We propose to smooth out the calibration score, which measures how good a forecaster is, by combining nearby forecasts. While regular calibration can be guaranteed only by randomized forecasting procedures, we show that smooth calibration can be guaranteed by deterministic procedures. As a consequence, it does not matter if the forecasts are leaked, i.e., made known in advance: smooth calibration can nevertheless be guaranteed (while regular calibration cannot). Moreover, our procedure has finite recall, is stationary, and all forecasts lie on a finite grid. We also consider related problems: online linear regression, weak calibration, and uncoupled Nash dynamics in n-person games.},
url = {http://www.ma.huji.ac.il/hart/abs/calib-eq.html},
author = {Dean P. Foster, Sergiu Hart}
}
@booklet {amir-uoosiclg2015,
title = {Uniqueness of Optimal Strategies in Captain Lotto Games},
journal = {Discussion Papers},
number = {687},
year = {2015},
month = {6},
abstract = {We consider the class of two-person zero-sum allocation games known as Captain Lotto games (Hart 2014). These are Colonel Blotto type games in which the players have capacity constraints. We show that the players optimal strategies are unique in most cases.},
url = {/files/dp687.pdf},
author = {Amir, Nadav}
}
@booklet {toddrkaplan-aia2014,
title = {Advances in Auctions},
journal = {Discussion Papers},
number = {662},
year = {2014},
month = {3},
publisher = {This Paper Is a Chapter in the Forthcoming Handbook of Game Theory, Volume 4, Edited by Peyton Young and Shmuel Zamir, Elsevier (2014).},
abstract = {As a selling mechanism, auctions have acquired a central position in the free market econ-omy all over the globe. This development has deepened, broadened, and expanded the theory of auctions in new directions. This chapter is intended as a selective update of some of the developments and applications of auction theory in the two decades since Wilson (1992) wrote the previous Handbook chapter on this topic.},
url = {/files/dp662.pdf},
author = {Todd R. Kaplan, Shmuel Zamir}
}
@booklet {hart-agwcfcltaa2014,
title = {Allocation Games with Caps: From Captain Lotto to All-Pay Auctions},
journal = {Discussion Papers},
number = {670},
year = {2014},
month = {11},
abstract = {A Lotto game is a two-person zero-sum game where each player chooses a distribution on nonnegative real numbers with given expectation, so as to maximize the probability that his realized choice is higher than his opponent{\textquoteright}s. These games arise in various competitive allocation setups (e.g., contests, research and development races, political campaigns, Colonel Blotto games). A Captain Lotto game is a Lotto game with caps, which are upper bounds on the numbers that may be chosen. First, we solve the Captain Lotto games. Second, we show how to reduce all-pay auctions to simpler games expenditure games using the solution of the corresponding Lotto games. As a particular application we solve all-pay auctions with unequal caps, which yield a significant increase in the seller{\textquoteright}s revenue (or, the players{\textquoteright} efforts).},
url = {http://www.ma.huji.ac.il/hart/abs/lotto.html},
author = {Sergiu Hart}
}
@booklet {yannaiagonczarowski-ctehcoeirsg2014,
title = {Cascading to Equilibrium: Hydraulic Computation of Equilibria in Resource Selection Games},
journal = {Discussion Papers},
number = {673},
year = {2014},
month = {12},
abstract = {Drawing intuition from a (physical) hydraulic system, we present a novel framework, constructively showing the existence of a strong Nash equilibrium in resource selection games with nonatomic players, the coincidence of strong equilibria and Nash equilibria in such games, and the invariance of the cost of each given resource across all Nash equilibria. Our proofs allow for explicit calculation of Nash equilibrium and for explicit and direct calculation of the resulting (invariant) costs of resources, and do not hinge on any fixed-point theorem, on the Minimax theorem or any equivalent result, on the existence of a potential, or on linear programming. A generalization of resource selection games, called resource selection games with I.D.-dependent weighting, is defined, and the results are extended to this family, showing that while resource costs are no longer invariant across Nash equilibria in games of this family, they are nonetheless invariant across all strong Nash equilibria, drawing a novel fundamental connection between group deviation and I.D.-congestion. A natural application of the resulting machinery to a large class of constraint-satisfaction problems is also described.},
url = {/files/dp673.pdf},
author = {Yannai A. Gonczarowski, Moshe Tennenholtz}
}
@booklet {bezalelpeleg-ckfmfepr2014,
title = {Choosing K from M: Feasible Elimination Procedures Reconsidered},
journal = {Discussion Papers},
number = {671},
year = {2014},
month = {12},
abstract = {We show that feasible elimination procedures (Peleg, 1978) can be used to select k from m alternatives. An important advantage of this method is the core property: no coalition can guarantee an outcome that is preferred by all its members. We also provide an axiomatic characterization for the case k = 1, using the conditions of anonymity, Maskin monotonicity, and independent blocking. Finally, we show for any k that outcomes of feasible elimination procedures can be computed in polynomial time, by showing that the problem is computationally equivalent to finding a maximal matching in a bipartite graph.},
url = {/files/dp671.pdf},
author = {Bezalel Peleg, Hans Peters}
}
@booklet {ofriraviv-cbbrftiopsop2014,
title = {Contradictory Behavioral Biases Result from the Influence of Past Stimuli on Perception},
journal = {Discussion Papers},
number = {672},
year = {2014},
month = {12},
abstract = {Biases such as the preference of a particular response for no obvious reason, are an integral part of psychophysics. Such biases have been reported in the common two-alternative forced choice (2AFC) experiments, where participants are instructed to compare two consecutively presented stimuli. However, the principles underlying these biases are largely unknown and previous studies have typically used ad-hoc explanations to account for them. Here we consider human performance in the 2AFC tone frequency discrimination task, utilizing two standard protocols. In both protocols, each trial contains a reference stimulus. In one (Reference-Lower protocol), the frequency of the reference stimulus is always lower than that of the comparison stimulus whereas in the other (Reference protocol), the frequency of the reference stimulus is either lower or higher than that of the comparison stimulus. We find substantial interval biases. Namely, participants perform better when the reference is in a specific interval. Surprisingly, the biases in the two experiments are opposite: performance is better when the reference is in the first interval in the Reference protocol, but is better when the reference is second in the Reference-Lower protocol. This inconsistency refutes previous accounts of the interval bias, and is resolved when experiments statistics is considered. Viewing perception as incorporation of sensory input with prior knowledge accumulated during the experiment accounts for the seemingly contradictory biases both qualitatively and quantitatively. The success of this account implies that even simple discriminations reflect a combination of sensory limitations, memory limitations, and the ability to utilize stimuli statistics.},
url = {/files/dp672.pdf},
author = {Ofri Raviv, Itay Lieder, Yonatan Loewenstein, Merav Ahissar}
}
@booklet {itaiarieliy-dogwsepm2014,
title = {Determinacy of Games with Stochastic Eventual Perfect Monitoring},
journal = {Discussion Papers},
number = {658},
year = {2014},
month = {1},
abstract = {We consider an infinite two-player stochastic zero-sum game with a Borel winning set, in which the opponent{\textquoteright}s actions are monitored via stochastic private signals. We introduce two conditions of the signalling structure: Stochastic Eventual Perfect Monitoring (SEPM) and Weak Stochastic Eventual Perfect Monitoring (WSEPM). When signals are deterministic these two conditions coincide and by a recent result due to [Shmaya (2011)] entail determinacy of the game. We generalize [Shmaya (2011)]{\textquoteright}s result and show that in the stochastic learning environment SEPM implies determinacy while WSEPM does not.},
url = {/files/dp658.pdf},
author = {Itai Arieliy, Yehuda (John) Levy}
}
@booklet {galinoti-aeeobbiaa2014,
title = {An Experimental Evaluation of Bidders{\textquoteright} Behavior in Ad Auctions},
journal = {Discussion Papers},
number = {676},
year = {2014},
month = {12},
publisher = {WWW {\textquoteright}14 Proceedings of the 23rd International Conference on World Wide Web, Pages 619-630},
abstract = {We performed controlled experiments of human participants in a continuous sequence of ad auctions, similar to those used by Internet companies. The goal of the research was to understand users{\textquoteright} strategies in making bids. We studied the behavior under two auction types: (1) the Generalized Second-Price (GSP) auction and (2) the Vickrey{\textendash}Clarke{\textendash}Groves (VCG) payment rule, and manipulated also the participants{\textquoteright} knowledge conditions: (1) explicitly given valuations and (2) payoff information from which valuations could be deduced. We found several interesting behaviors, among them are: - No convergence to equilibrium was detected; moreover the frequency with which participants modified their bids increased with time. - We can detect explicit "better-response" behavior rather than just mixed bidding. - While bidders in GSP auctions do strategically shade their bids, they tend to bid higher than theoretically predicted by the standard VCG-like equilibrium of GSP. - Bidders who are not explicitly given their valuations but can only deduce them from their gains behave a little less "precisely" than those with such explicit knowledge, but mostly during an initial learning phase. - VCG and GSP yield approximately the same (high) social welfare, but GSP tends to give higher revenue.},
url = {/files/dp676.pdf},
author = {Gali Noti, Noam Nisan, Ilan Yaniv}
}
@booklet {abbamkrieger-agsp2014,
title = {A Generalized Secretary Problem},
journal = {Discussion Papers},
number = {668},
year = {2014},
month = {7},
abstract = {{A new Secretary Problem is considered, where for fixed k and m one wins if at some time i = m(j .. 1) + 1 up to jm one selects one of the j best items among the first jm items},
url = {/files/dp668.pdf},
author = {Abba M. Krieger, Ester Samuel-Cahn}
}
@booklet {mayabar-hillel-ot-arbibc2014,
title = {"Heads or Tails?" - A Reachability Bias in Binary Choice},
journal = {Discussion Papers},
number = {657},
year = {2014},
month = {1},
publisher = {Journal of Experimental Psychology: Learning, Memory, and Cognition, Apr 28 , 2014,},
abstract = {When asked to mentally simulate coin tosses, people generate sequences which differ systematically from those generated by fair coins. It has been rarely noted that this divergence is apparent already in the very first mental toss. Analysis of several existing data sets reveals that about 80\% of respondents start their sequence with Heads. We attributed this to the linguistic convention describing coin toss outcomes as "Heads or Tails", not vice versa. However, our subsequent experiments found the "first-toss" bias reversible under minor changes in the experimental setup, such as mentioning Tails before Heads in the instructions. We offer a comprehensive account in terms of a novel response bias, which we call reachability. It is more general than the first-toss bias, and reflects the relative ease of reaching one option compared to its alternative in any binary choice context. When faced with a choice between two options (e.g., Heads and Tails, when "tossing" mental coins), whichever of the two is presented first by the choice architecture (hence, is more reachable) will be favored. This bias has far-reaching implications extending well beyond the context of randomness cognition, and in particular to binary surveys (e.g., accept vs. reject) and tests (e.g., True-False). In binary choice, there is an advantage to what presents first. Keywords: acquiescence bias; order effects; randomness cognition; reachability; response bias},
url = {/files/dp657.pdf},
author = {Maya Bar-Hillel, Eyal Peer, Alessandro Acquisti}
}
@booklet {sergiuhart-hgasmfsmg2014,
title = {How Good Are Simple Mechanisms for Selling Multiple Goods?},
journal = {Discussion Papers},
number = {666},
year = {2014},
month = {5},
abstract = {Maximizing the revenue from selling two goods (or items) is a notoriously difficult problem, in stark contrast to the single-good case. We show that simple "one-dimensional" mechanisms, such as selling the two goods separately, guarantee at least 73\% of the optimal revenue when the valuations of the two goods are independent and identically distributed, and at least 50\% when they are independent. However, in the general case where the valuations may be correlated, simple mechanisms cannot guarantee any positive fraction of the optimal revenue. We also introduce a "measure of complexity" for mechanisms{\textendash}-the menu size{\textendash}-and show that it is naturally related to the fraction of the optimal revenue that can be guaranteed.},
url = {http://www.ma.huji.ac.il/hart/abs/m-simple.html},
author = {Sergiu Hart, Noam Nisan}
}
@booklet {einavhart-ieiuceoaoiieac2014,
title = {Investing Even in Uneven Contests: Effects of Asymmetry on Investment in Experimental All-Pay Contests},
journal = {Discussion Papers},
number = {660},
year = {2014},
month = {2},
abstract = {Many competitions require investment of nonrefundable resources, e.g., political campaigns, financial markets, sports or courting rituals. One contestant wins the prize for the invested amount, while all others forfeit their investments without receiving compensation. Frequently, contests are asymmetric, due to differing resources or prize valuations. This could lead weaker contestants to avoid investing, and stronger ones to lower their investment. Two experiments explored the effects of asymmetry between the contestants "arising from their endowments or prizes "on investments. Subjects played both symmetric and asymmetric contests, enabling direct within-subject comparisons. We observed an effect of asymmetry only when it concerned endowments: Subjects invested less when their endowments were asymmetric, whereas (a-)symmetry in the prizes did not influence investments. The changes between consecutive investments can be explained by reactions to the previous outcome (win or loss) in terms of regret over the previous investment being too much or too little.},
url = {/files/db660.pdf},
author = {Einav Hart, Judith Avrahami, Yaakov Kareev, Peter M. Todd}
}
@booklet {davidazriel-omacuosm2014,
title = {On Measuring and Comparing Usefulness of Statistical Models},
journal = {Discussion Papers},
number = {669},
year = {2014},
month = {10},
abstract = {Statistical models in econometrics, biology, and most other areas, are not expected to be correct, and often are not very accurate. The choice of a model for the analysis of data depends on the purpose of the analysis, the relation between the data and the model, and also on the sample or data size. Combining ideas from Erev, Roth, Slonim, and Barron (2007) and the well-known AIC criterion and cross-validation, we propose a variant of model selection approach as a function of the models and the data size, with quantification of the chosen model{\textquoteright}s relative value. Our research is motivated by data from experimental economics, and we also give a simple biological example.},
url = {/files/db669.pdf},
author = {David Azriel, Yosef Rinott}
}
@booklet {gianluigimongillo-tmorl2014,
title = {Misbehavior of Reinforcement Learning, The},
journal = {Discussion Papers},
number = {661},
year = {2014},
month = {3},
publisher = {Forthcoming in Proc. IEEE},
abstract = {Organisms modify their behavior in response to its consequences, a phenomenon referred to as operant learning. The computational principles and neural mechanisms underlying operant learning are a subject of extensive experimental and theoretical investigations. Theoretical approaches largely rely on concepts and algorithms from Reinforcement Learning. The dominant view is that organisms maintain a value function, that is a set of estimates of the cumulative future rewards associated with the different behavioral options. These values are then used to select actions. Learning in this framework results from the update of these values depending on experience of the consequences of past actions. An alternative view questions the applicability of such a computational scheme to many real-life situations. Instead, it posits that organisms exploit the intrinsic variability in their action selection mechanism(s) to modify their behavior, e.g., via stochastic gradient ascent, without the need of an explicit representation of values. In this review, we compare these two approaches in terms of their computational power and flexibility, their putative neural correlates and, finally, in terms of their ability to account for behavior as observed in repeated-choice experiments. We discuss the successes and failures of these alternative approaches in explaining the observed patterns of choice behavior. We conclude by identifying some of the important challenges to a comprehensive theory of operant learning.},
url = {/files/db661.pdf},
author = {Gianluigi Mongillo, Hanan Shteingart, Yonatan Loewenstein}
}
@booklet {oneill-noricate2014,
title = {Networks of Rights in Conflict: A Talmudic Example},
journal = {Discussion Papers},
number = {677},
year = {2014},
month = {12},
abstract = {Many disputes involve conflicts of rights. A common view is that rights cannot really be in conflict so one of those being claimed must be a mistake. This idea leads to extreme outcomes that cut some parties out. Many studies have investigated how to choose a compromise among rights but they have focus on situations where the incompatibility comes from the degrees of the claims, as when, for example, a deceased person promised his heirs more than his total estate. I analyze a Talmudic problem where the difficulty is the pattern of the rights - each one trumps another in a cycle. The theory of non-transferable utility coalitional games suggests two solutions, one based on Shapley{\textquoteright}s and Maschler-Owen{\textquoteright}s values, which are equivalent for the problem, and the other on Harsanyi{\textquoteright}s and Kalai-Samet{\textquoteright}s, also equivalent. Each satisfies four out of five desirable properties, better than several other solutions. The NTU games are appropriate not just for power-based negotiation but for disputes over justice, fairness and rights. It is hoped that this analysis will form part of a general understanding of rights conflicts.},
url = {/files/db677.pdf},
author = {Barry O{\textquoteright}Neill}
}
@booklet {yannaiagonczarowski-nmaatfod2014,
title = {Noncooperative Market Allocation and the Formation of Downtown},
journal = {Discussion Papers},
number = {663},
year = {2014},
month = {3},
abstract = {Can noncooperative behaviour of merchants lead to a market allocation that prima facie seems anticompetitive? We introduce a model in which service providers aim at optimizing the number of customers who use their services, while customers aim at choosing service providers with minimal customer load. Each service provider chooses between a variety of levels of service, and as long as it does not lose customers, aims at minimizing its level of service; the minimum level of service required to satisfy a customer varies across customers. We consider a two-stage competition, in the first stage of which the service providers select their levels of service, and in the second stage {\textendash}- customers choose between the service providers. (We show via a novel construction that for any choice of strategies for the service providers, a unique distribution of the customers{\textquoteright} mass between them emerges from all Nash equilibria among the customers, showing the incentives of service providers in the two-stage game to be well defined.) In the two-stage game, we show that the competition among the service providers possesses a unique Nash equilibrium, which is moreover super strong; we also show that all sequential better-response dynamics of service providers reach this equilibrium, with best-response dynamics doing so surprisingly fast. If service providers choose their levels of service according to this equilibrium, then the unique Nash equilibrium among customers in the second phase is essentially an allocation (i.e. split) of the market between the service providers, based on the customers{\textquoteright} minimum acceptable quality of service; moreover, each service provider{\textquoteright}s chosen level of service is the lowest acceptable by the entirety of the market share allocated to it. Our results show that this seemingly-cooperative allocation of the market arises as the unique and highly-robust outcome of noncooperative (i.e. free from any form of collusion), even myopic, service-provider behaviour. The results of this paper are applicable to a variety of scenarios, such as the competition among ISPs, and shed a surprising light on aspects of location theory, such as the formation and structure of a city{\textquoteright}s central business district.},
url = {/files/db663.pdf},
author = {Yannai A. Gonczarowski, Moshe Tennenholtz}
}
@booklet {giladbavly-occabr2014,
title = {Online Concealed Correlation and Bounded Rationality},
journal = {Discussion Papers},
number = {659},
year = {2014},
month = {2},
abstract = {Correlation of players{\textquoteright} actions may evolve in the common course of the play of a repeated game with perfect monitoring ("obline correlation). In this paper we study the concealment of such correlation from a boundedly rational player. We show that "strong players, i.e., players whose strategic complexity is less stringently bounded, can orchestrate the obline correlation of the actions of "weak players, where this correlation is concealed from an opponent of "intermediate strength. The feasibility of such "ol concealed correlation is reflected in the individually rational payoff of the opponent and in the equilibrium payoffs of the repeated game. This result enables the derivation of a folk theorem that characterizes the set of equilibrium payoffs in a class of repeated games with boundedly rational players and a mechanism designer who sends public signals. The result is illustrated in two models, each of which captures a different aspect of bounded rationality. In the first, players use bounded recall strategies. In the second, players use strategies that are implementable by finite automata.},
url = {/files/dp659.pdf},
author = {Gilad Bavly , Abraham Neyman}
}
@booklet {hananshteingart-rlahb2014,
title = {Reinforcement Learning and Human Behavior},
journal = {Discussion Papers},
number = {656},
year = {2014},
month = {1},
publisher = {Current Opinion in Neurobiology 2014, 25:93 {\textquotedblleft}98},
abstract = {The dominant computational approach to model operant learning and its underlying neural activity is model-free reinforcement learning (RL). However, there is accumulating behavioral and neuronal-related evidence that human (and animal) operant learning is far more multifaceted. Theoretical advances in RL, such as hierarchical and model-based RL extend the explanatory power of RL to account for some of these findings. Nevertheless, some other aspects of human behavior remain inexplicable even in the simplest tasks. Here we review developments and remaining challenges in relating RL models to human operant learning. In particular, we emphasize that learning a model of the world is an essential step prior or in parallel to learning the policy in RL and discuss alternative models that directly learn a policy without an explicit world model in terms of state-action pairs.},
url = {/files/dp656.pdf},
author = {Hanan Shteingart, Yonatan Loewenstein}
}
@booklet {iritnowik-otridfne2014,
title = {On the Risk in Deviating from Nash Equilibrium},
journal = {Discussion Papers},
number = {664},
year = {2014},
month = {4},
abstract = {The purpose of this work is to offer for any zero-sum game with a unique strictly mixed Nash equilibrium, a measure for the risk when deviating from the Nash equilibrium. We present two approaches regarding the nature of deviations; strategic and erroneous. Accordingly, we define two models. In each model we define risk measures for the row-player (PI) and the column player (PII), and prove that the risks of PI and PII coincide. This result holds for any norm we use for the size of deviations. We develop explicit expressions for the risk measures in the L1 and L2 norms, and compute it for several games. Although the results hold for all norms, we show that only the L1 norm is suitable in our context, as it is the only norm which is consistent in the sense that it gives the same size to potentially equivalent deviations. The risk measures defined here enables testing and evaluating predictions on the behavior of players. For example: Do players deviate more in a game with lower risks than in a game with higher risk?},
url = {/files/dp664.pdf},
author = {Irit Nowik, Shmuel Zamir}
}
@booklet {moshehaviv-soaqvrp2014,
title = {Self-Regulation of a Queue Via Random Priorities},
journal = {Discussion Papers},
number = {674},
year = {2014},
month = {12},
abstract = {We consider a memoryless unobservable single-server queue where customers are homogeneous with respect to their reward (due to service completion) and with respect to their cost per unit of time of waiting. Left to themselves, it is well known that in equilibrium they will join the queue at a rate that is higher than it is socially optimal. We show that if customers draw a random preemptive priority parameter prior to deciding whether or not to join, the resulting equilibrium joining rate coincides with the socially optimal one. We also introduce some variations of this regulation scheme and review a few existing schemes from the literature. We suggest a classification of all these schemes, based on a few key properties, and use it to compare our new schemes with the existing ones.},
url = {/files/dp674.pdf},
author = {Moshe Haviv, Binyamin Oz}
}
@booklet {talneiman-sgiollfpb2014,
title = {Spatial Generalization in Operant Learning: Lessons from Professional Basketball},
journal = {Discussion Papers},
number = {665},
year = {2014},
month = {4},
abstract = {In operant learning, behaviors are reinforced or inhibited in response to the consequences of similar actions taken in the past. However, because in natural environments the {\textquoteright}same situation never recurs, it is essential for the learner to decide what {\textquoteright}similar is so that he can generalize from experience in one state of the world to future actions in different states of the world. The computational principles underlying this generalization are poorly understood, in particular because natural environments are typically too complex to study quantitatively. In this paper we study the principles underlying generalization in operant learning of professional basketball players. In particular, we utilize detailed information about the spatial organization of shot locations to study how players adapt their attacking strategy in real time according to recent events in the game. To quantify this learning, we study how a make{\textquoteright}miss from one location in the court affects the probabilities of shooting from different locations. We show that generalization is not a spatially-local process, nor is governed by the difficulty of the shot. Rather, to a first approximation, players use a simplified binary representation of the court into 2pt and 3pt zones. This result indicates that rather than using low-level features, generalization is determined by high-level cognitive processes that incorporate the abstract rules of the game.},
url = {/files/dp665.pdf},
author = {Tal Neiman , Yonatan Loewenstein}
}
@booklet {yannaiagonczarowski-asmrc2014,
title = {A Stable Marriage Requires Communication},
journal = {Discussion Papers},
number = {667},
year = {2014},
month = {6},
abstract = {The Gale-Shapely algorithm for the Stable Marriage Problem is known to take Theta(n^2) steps to find a stable marriage in the worst case, but only Theta(n log n) steps in the average case (with n women and n men). In 1976, Knuth asked whether the worst-case running time can be improved in a model of computation that does not require sequential access to the whole input. A partial negative answer was given by Ng and Hirschberg, who showed that Theta(n^2) queries are required in a model that allows certain natural random-access queries to the participants{\textquoteright} preferences. Using a reduction to the communication complexity of the disjointness problem, we prove a significantly more general - albeit slightly weaker - result, showing that Omega(n^2) Boolean queries of any type are required. Our lower bound generalizes to (A) randomized algorithms, (B) even just verifying the stability of a proposed marriage, (C) even allowing arbitrary separate preprocessing of the women{\textquoteright}s preferences and of the men{\textquoteright}s preferences, and (D) several variants of the basic problem, such as whether a given pair is married in every/some stable marriage.},
url = {/files/dp667.pdf},
author = {Yannai A. Gonczarowski, Noam Nisan}
}
@booklet {moshehaviv-stoatafqmls2014,
title = {Strategic Timing of Arrivals to a Finite Queue Multi-Server Loss System},
journal = {Discussion Papers},
number = {675},
year = {2014},
month = {12},
abstract = {We provide Game-theoretic analysis of the arrival process to a multi-serve r system with a limited queue buffer, which admits customers only during a finite time interval. A customer who arrives at a full system is blocked and do es not receive service. Customers can choose their arrival times with the goal of minimizing their probability of being blocked. We characterize the unique symmetric Nash equilibrium arrival distribution and present a method for computing it. This distribution is comprised of an atom at time zero, an interval with no arrivals (a gap), and a continuous distribution until the closing time. We further present a fluid approximation for the equilibrium behaviour when the population is large, where the fluid solution also admits an atom at zero, no gap, and a uniform distribution throughout the arrival interval. In doing so, we provide an approximation model for the equilibrium behaviour that do es not require a numerical solution for a set of differential equations, as is required in the discrete case. For the corresponding problem of social optimization we provide explicit analysis of some special cases and numerical analysis of the general model. An upper bound is established for the price of anarchy (PoA). The PoA is shown to b e not monotone with respect to population size.},
url = {/files/dp675.pdf},
author = {Moshe Haviv, Liron Ravner}
}
@booklet {amielvasl-taronaic2013,
title = {Adaptive Role of Nectarial Appendages in Colchicum, The},
journal = {Discussion Papers},
number = {645},
year = {2013},
month = {9},
abstract = {A few species within the genus Colchicum of the Colchicaceae family, a small group of species native to the transitional belt of the Mediterranean and the Middle East deserts, are characterized by unique morphological traits: nectarial appendages that occur at the base of the perianth segments and consist of two lamellae with teeth. The morphology of the nectarial appendages was measured in three species and in a new population with similar traits to this group for the first time. Nectarial appendages and nectar standing crop are larger for the inner whorl of perianth segments in all species, although the perianth segments are themselves usually smaller. Intact flowers received more ant visits in outer than in inner whorl perianth nectaries. Removal of the nectarial appendages resulted in an opposite trend, implying that these organs prevent ant access to nectaries. Ant access to flowers reduced nectar standing crop, which could reduce the fitness of the species assuming that ants do not pollinate. The role of nectarial appendages as nectar-theft deterrents is reinforced in light of the group{\textquoteright}s harsh habitat and flowering season.},
url = {/files/dp645.pdf},
author = {Amiel Vasl, Avi Shmida}
}
@booklet {zivhellman-bgwacos2013,
title = {Bayesian Games With a Continuum of States},
journal = {Discussion Papers},
number = {641},
year = {2013},
month = {5},
abstract = {Negative results on the the existence of Bayesian equilibria when state spaces have the cardinality of the continuum have been attained in recent years. This has led to the natural question: are there conditions that characterise when Bayesian games over continuum state spaces have measurable Bayesian equilibria? We answer this in the affirmative. Assuming that each type has finite or countable support, measurable Bayesian equilibria may fail to exist if and only if the underlying common knowledge $sigma$-algebra is non-separable. Furthermore, anomalous examples with continuum state spaces have been presented in the literature in which common priors exist over entire state spaces but not over common knowledge components. There are also spaces over which players can have no disagreement, but when restricting attention to common knowledge components disagreements can exist. We show that when the common knowledge $sigma$-algebra is separable all these anomalies disappear.},
url = {/files/dp641.pdf},
author = {Ziv Hellman, Yehuda (John) Levy}
}
@booklet {nehama-coolita2013,
title = {Complexity of Optimal Lobbying in Threshold Aggregation},
journal = {Discussion Papers},
number = {642},
year = {2013},
month = {7},
publisher = {Proceedings of the 4th International Conference, ADT 2015, P. 379-395},
abstract = {Optimal Lobbying is the problem a lobbyist or a campaign manager faces in a full-information voting scenario of a multi-issue referendum when trying to influence the result. The Lobby is faced with a profile that specifies for each voter and each issue whether the voter approves or rejects the issue, and seeks to find the smallest set of voters it must influence to change their vote, for a desired outcome to be obtained. This computational problem also describes problems arising in other scenarios of aggregating complex opinions, such as principal-agents incentives scheme in a complex combinatorial problem, and bribery and manipulation in Truth-Functional Judgement Aggregation. We study the computational complexity of Optimal Lobbying when the issues are aggregated using an anonymous monotone function and the family of desired outcomes is an upward-closed family. We analyze this problem with regard to two parameters: the minimal number of supporters needed to pass an issue, and the size of the maximal minterm of the desired set. We show that for the extreme values of the parameters, the problem is tractable, and provide algorithms. On the other hand, we prove intractability of the problem for the non-extremal values, which are common values for the parameters.},
url = {/files/Published version},
author = {Ilan Nehama}
}
@booklet {peleg-cvsrcaac2013,
title = {Consistent Voting Systems Revisited: Computation and Axiomatic Characterization},
journal = {Discussion Papers},
number = {649},
year = {2013},
month = {10},
abstract = {We add two results to the theory of consistent voting. Let M be the set of all survivors of some feasible elimination procedure. We prove that i) M can be computed in polynomial time for each profile of preferences and ii) M is characterized by anonymity, non- imposition, Maskin monotonicity, and additive blocking.},
url = {/files/dp649.pdf},
author = {Bezalel Peleg}
}
@booklet {gonczarowski-tdotclosciarp2013,
title = {Distribution of the Combined Length of Spanned Cycles in a Random Permutation, The},
journal = {Discussion Papers},
number = {650},
year = {2013},
month = {11},
abstract = {For a random permutation on 1,2, {\textbrokenbar},n for fixed n, and for M{\v S}{\textdagger}1,2, {\textbrokenbar},n, we analyse the distribution of the combined length L=L(,M) of all cycles of that contain at least one element of M. We give a simple, explicit formula for the probability of every possible value for L (backed by three proofs of distinct flavours), as well as closed-form formulae for its expectation and variance, showing that less than 1/(|M|+1) of the elements 1, {\textbrokenbar},n are expected to be contained in cycles of that are disjoint from M, with low probability for a large deviation from this fraction. We furthermore give a simple explicit formula for all rising-factorial moments of L. These results are applicable to the study of manipulation in matching markets.},
url = {/files/dp650.pdf},
author = {Yannai A. Gonczarowski}
}
@booklet {xu-esifsguafbd2013,
title = {Evolutionary Stability in Finite Stopping Games Under a Fast Best-Reply Dynamics},
journal = {Discussion Papers},
number = {632},
year = {2013},
month = {1},
abstract = {We consider a fast evolutionary dynamic process on finite stopping games, where each player at each node has at most one move to continue the game. A state is evolutionarily stable if its long-run relative frequency of occurrence is bounded away from zero as the mutation rate decreases to zero. The fast dynamic process allows each individual in each population to change its strategy at every stage. We define a robustness index of backward induction and show examples where the backward induction equilibrium component is not evolutionarily stable for large populations. We show some sufficient conditions for evolutionary stability, which are different from the ones for the conventional evolutionary model. Even for this fast dynamic process, the transition between any two Nash equilibrium components may take very long time.},
url = {/files/dp632.pdf},
author = {Zibo Xu}
}
@booklet {xu-esigegopi2013,
title = {Evolutionary Stability in General Extensive-Form Games of Perfect Information},
journal = {Discussion Papers},
number = {631},
year = {2013},
month = {1},
abstract = {We consider a basic dynamic evolutionary model with rare mutation and a best-reply (or better-reply) selection mechanism. A state is evolutionarily stable if its long-term relative frequency of occurrence is bounded away from zero as the mutation rate decreases to zero. We prove that, for all finite extensive-form games of perfect information, only Nash equilibria are evolutionarily stable. We show that, in games where a player may play at more than one node along some path, even when the populations increase to infinity, there may be some evolutionarily stable states which are not part of the backward induction equilibrium component. We give a sufficient condition for evolutionary stability and show how much extra value is needed in the terminal payoffs to make an equilibrium evolutionarily stable.},
url = {/files/dp631.pdf},
author = {Zibo Xu}
}
@booklet {edyglozman-fntasonraa2013,
title = {False Negotiations: The Art \& Science of Not Reaching an Agreement},
journal = {Discussion Papers},
number = {646},
year = {2013},
month = {9},
publisher = {Forthcoming in Journal of Conflict Resolution},
abstract = {The usual purpose of negotiations is to explore options and reach an agreement, if possible. We investigated a notable exception to this generalization, where a party negotiates without any intention of reaching an agreement. False negotiation occurs when a party gains more by stalling the negotiations until an external change takes place that improves its position considerably. While false negotiators aim to avoid agreement within the current frame of the negotiations, they also aim to keep the negotiation process alive, since walking away from the negotiation table could endanger their position. We report the results of a study that compared the actions of false and sincere negotiators. The false negotiators used competitive tactics that encumbered the negotiations, yet they concealed their intentions by maintaining a fa\Sade of cooperation. Our theoretical discussion is focused on the balancing act involved in false negotiations and the challenges it poses for actors in social, managerial, and political settings. We conclude our analysis with an example from the realm of international negotiations.},
url = {/files/dp646.pdf},
author = {Edy Glozman, Netta Barak-Corren, Ilan Yaniv}
}
@booklet {rodrguez-barraquer-fsoetsoiubgosc2013,
title = {From Sets of Equilibria to Structures of Interaction Underlying Binary Games of Strategic Complements},
journal = {Discussion Papers},
number = {655},
year = {2013},
month = {12},
abstract = {Consider a setting in which agents can each take one of two ordered actions and in which the incentive of any given agent to take the high action is positively reinforced by the number of other agents that take it. Furthermore, assume that we don{\textquoteright}t know any other details about the game being played. What can we say about the details of the structure of the interaction between actions and incentives when we observe a set or a subset of all possible equilibria? In this paper we study 3 nested classes of games: (a) binary games of strategic complements; (b) games in (a) that admit a network representation: and (c) games in (b) in which the network is complete. Our main results are the following: It has long been established in the literature that the set of pure strategy Nash equilibria of any binary game of strategic complements among a set N of agents can be seen as a lattice on the set of all subsets of N under the partial order defined by the set inclusion relation. If the game happens to be strict in the sense that agents are never indifferent among outcomes (games in (a)), then the resulting lattice of equilibria satisfies a straightforward sparseness condition. (1) We show that, in fact, the games in (a) express all such lattices. (2) We characterize the collection of subsets of N that can be weakly expressed as the set of equilibria of some game of thresholds (games in (b)). (3) We characterize the collection of subsets of N that can be weakly expressed as the set of equilibria of some game of thresholds on the complete graph (games in (c)).},
url = {/files/dp655.pdf},
author = {Rodr-guez-Barraquer, Tom{\textexclamdown}s}
}
@booklet {armandocastaeda-gbbupfcasc2013,
title = {Good, Better, Best! Unbeatable Protocols for Consensus and Set Consensus},
journal = {Discussion Papers},
number = {653},
year = {2013},
month = {11},
abstract = {While the very first consensus protocols for the synchronous model were designed to match the worst-case lower bound, deciding in exactly t+1 rounds in all runs, it was soon realized that they could be strictly improved upon by early stopping protocols. These dominate the first ones, by always deciding in at most t+1 rounds, but often much faster. A protocol is unbeatable if it can{\textquoteright}t be strictly dominated. Namely, if no protocol Q can decide strictly earlier than P against at least one adversary strategy, while deciding at least as fast as P in all cases. Unbeatability is often a much more suitable notion of optimality for distributed protocols than worst-case performance. Halpern, Moses and Waarts (2001), who introduced this notion, presented a general logic-based transformation of any consensus protocol to an unbeatable protocol that dominates it, and suggested a particular unbeatable consensus protocol. Their analysis is based on a notion of continual common knowledge, which is not easy to work with in practice. Using a more direct knowledge-based analysis, this paper studies unbeatability for both consensus and k-set consensus. We present unbeatable solutions to non-uniform consensus and k-set consensus, and uniform consensus in synchronous message-passing contexts with crash failures. Our consensus protocol strictly dominates the one suggested by Halpern, Moses and Waarts, showing that their protocol is in fact beatable.The k-set consensus problem is much more technically challenging than consensus, and its analysis has triggered the development of the topological approach to distributed computing. Worst-case lower bounds for this problem have required either techniques based on algebraic topology (Guerraoui et al., 2009), or reduction-based proofs (Alistarh et al., 2012; Gafni et al., 2011). Our proof of unbeatability is purely combinatorial, and is a direct, albeit nontrivial, generalization of the one for consensus. We also present an alternative topological unbeatability proof that allows to understand the connection between the connectivity of protocol complexes and the decision time of processes. All of our protocols make use of a notion of a hidden path of nodes relative to a process i at time m, in which a value unknown to i at m may be seen by others. This is a structure that can implicitly be found in lower bound proofs for consensus going back to the {\textquoteright}80s (Dolev and Strong, 1982). Its use in our protocols sheds light on the mathematical structure underlying the consensus problem and its variants.For the synchronous model, only solutions to the uniform variant of k-set consensus have been offered. Based on our unbeatable protocols for uniform consensus and for non-uniform k-set consensus, we present a uniform k-set consensus protocol that strictly dominates all known solutions to this problem in the synchronous model.},
url = {/files/dp653.pdf},
author = {Armando Casta$\pm$eda, Yannai A. Gonczarowski, Yoram Moses}
}
@booklet {xu-tiobiied2013,
title = {Instability of Backward Induction in Evolutionary Dynamics, The},
journal = {Discussion Papers},
number = {633},
year = {2013},
month = {1},
abstract = {This paper continues the work initiated in [19]. We adopt the same model as in [19]. We show that the non-backward-induction equilibrium component may be evolutionarily stable for any population size in a finite stopping game where the two equilibrium components are terminated by different players. A surprising result is that the backward induction equilibrium component may not be evolutionarily stable for large populations. Finally, we study the evolutionary stability result in a different limiting process where the expected number of mutations per generation is bounded away from both zero and infinity.},
url = {/files/db633.pdf},
author = {Zibo Xu}
}
@booklet {itaiarieli-tlobi2013,
title = {Logic of Backward Induction, The},
journal = {Discussion Papers},
number = {652},
year = {2013},
month = {11},
abstract = {The logic of backward induction (BI) in perfect information (PI) games has been intensely scrutinized for the past quarter century. A major development came in 2002, when P. Battigalli and M. Sinischalchi (BS) showed that an outcome of a PI game is consistent with common strong belief of utility maximization if and only if it is the BI outcome. Both BS{\textquoteright}s formulation, and their proof, are complex and deep. We show that the result continues to hold when utility maximization is replaced by a rationality condition that is even more compelling; more important, the formulation and proof become far more transparent, accessible, and self-contained.},
url = {/files/db652.pdf},
author = {Itai Arieli, Robert J. Aumann}
}
@booklet {gonczarowski-mosmumb2013,
title = {Manipulation of Stable Matchings Using Minimal Blacklists},
journal = {Discussion Papers},
number = {643},
year = {2013},
month = {7},
publisher = {Forthcoming in Proceedings of the 15th ACM Conference on Economics and Computation (EC 2014)},
abstract = {Gale and Sotomayor (1985) have shown that in the Gale-Shapley matching algorithm (1962), the proposed-to side W (referred to as women there) can strategically force the W-optimal stable matching as the M-optimal one by truncating their preference lists, each woman possibly blacklisting all but one man. As Gusfield and Irving have already noted in 1989, no results are known regarding achieving this feat by means other than such preference-list truncation, i.e. by also permuting preference lists.We answer Gusfield and Irving{\textquoteright}s open question by providing tight upper bounds on the amount of blacklists and their combined size, that are required by the women to force a given matching as the M-optimal stable matching, or, more generally, as the unique stable matching. Our results show that the coalition of all women can strategically force any matching as the unique stable matching, using preference lists in which at most half of the women have nonempty blacklists, and in which the average blacklist size is less than 1. This allows the women to manipulate the market in a manner that is far more inconspicuous, in a sense, than previously realized. When there are less women than men, we show that in the absence of blacklists for men, the women can force any matching as the unique stable matching without blacklisting anyone, while when there are more women than men, each to-be-unmatched woman may have to blacklist as many as all men. Together, these results shed light on the question of how much, if at all, do given preferences for one side a priori impose limitations on the set of stable matchings under various conditions. All of the results in this paper are constructive, providing efficient algorithms for calculating the desired strategies.},
url = {/files/db643R.pdf},
author = {Yannai A. Gonczarowski}
}
@booklet {sergiuhart-mcar2013,
title = {Markets, Correlation, and Regret-Matching},
journal = {Discussion Papers},
number = {648},
year = {2013},
month = {9},
abstract = {Inspired by the existing work on correlated equilibria and regret-based dynamics in games, we carry out a first exploration of the links between the leading equilibrium concept for (exchange) economies, Walrasian equilibrium, and the dynamics, specifically regret-matching dynamics, of trading games that fit the economic structure and have the property that their pure Nash equilibria implement the Walrasian outcomes. Interestingly, in the case of quasilinear utilities (or "transferable utility"), all the concepts essentially coincide, and we get simple deterministic dynamics converging to Walrasian outcomes. Connections to sunspot equilibria are also studied.},
url = {http://ma.huji.ac.il/hart/abs/we-regr.html},
author = {Sergiu Hart , Andreu Mas-Colell}
}
@booklet {sergiuhart-tmcoa2013,
title = {Menu-Size Complexity of Auctions, The},
journal = {Discussion Papers},
number = {637},
year = {2013},
month = {4},
abstract = {We consider the menu size of auctions as a measure of auction complexity and study how it affects revenue. Our setting has a single revenue-maximizing seller selling two or more heterogenous items to a single buyer whose private values for the items are drawn from a (possibly correlated) known distribution, and whose valuation is additive over the items. We show that the revenue may increase arbitrarily with menu size and that a bounded menu size can not ensure any positive fraction of the optimal revenue. The menu size turns out to "nail down" the revenue properties of deterministic auctions: their menu size may be at most exponential in the number of items and indeed their revenue may be larger than that achievable by the simplest types of auctions by a factor that is exponential in the number of items but no larger. Our model is related to a previously studied "unit-demand" model and our results also answer an open problem in that model.},
url = {http://www.ma.huji.ac.il/hart/abs/m-corr.html},
author = {Sergiu Hart, Noam Nisan}
}
@booklet {yaari-nppl2013,
title = {Newcomb{\textquoteleft}s Problem: Paradox Lost},
journal = {Discussion Papers},
number = {635},
year = {2013},
month = {4},
abstract = {{\textquoteright}~An agent needs to decide which of two available actions, A or B, to take. The agent{\textquoteright}s payoffs are such that A dominates B, i.e., taking A yields a better payoff than taking B, in every contingency. On the other hand, the agent{\textquoteright}s expected payoffs, given the action taken, are in the reverse order, i.e., E(payoff | B) > E(payoff | A) , which can happen if the probabilities of the various contingencies are not independent of the action being taken. What should the agent do? This dilemma has come to be known as Newcomb{\textquoteright}s Paradox (Nozick, 1969). The present essay shows that the rule "keep away, as much as possible, from any dominated action" is perfectly consistent with actually taking the dominated action, when appropriate. No paradox.{\textquoteright}{\textquoteright}},
url = {/files/db635.pdf},
author = {Menahem Yaari}
}
@booklet {runemidjord-oolcoe2013,
title = {Over-Caution of Large Committees of Experts},
journal = {Discussion Papers},
number = {654},
year = {2013},
month = {12},
abstract = {In this paper, we demonstrate that payoffs linked to a committee member{\textquoteright}s individual vote may explain over-cautious behavior in committees. A committee of experts must decide whether to approve or reject a proposed innovation on behalf of society. In addition to a payoff linked to the adequateness of the committee{\textquoteright}s decision, each expert receives a disesteem payoff if he/she voted in favor of an ill-fated innovation. An example is FDA committees, where committee members can be exposed to a disesteem (negative) payoff if they vote to pass a drug that proves to be fatal for some users. We show that no matter how small the disesteem payoffs are, information aggregation fails in large committees: under any majority rule, the committee rejects the innovation almost surely. We then show that this inefficiency can be mitigated by pre-vote information pooling, but only if the decision is take under unanimity: in the presence of disesteem payoffs, committee members will only vote efficiently if they are all responsible for the final decision.},
url = {/files/dp654.pdf},
author = {Rune Midjord, Tom s Rodr �guez-Barraquer, Justin Valasek}
}
@booklet {tamarkeasar-pvsmpbhfbp2013,
title = {Plant-Derived Visual Signals May Protect Beetle Herbivores from Bird Predators},
journal = {Discussion Papers},
number = {640},
year = {2013},
month = {5},
abstract = {Insect herbivores often use chemical signals obtained from their food plants to deter enemies and/or attract sexual partners. Do plant-based visual signals act similarly, i.e., repel consumers{\textquoteright} enemies and appeal to potential mates? We explored this question using the pollen-feeding beetle Pygopleurus israelitus (Glaphyridae), a specialized pollinator of Anemone coronaria{\textquoteright}s chemically defended red-morph flowers. We presented dead beetles, which had fed either on anemones or on cat-food, to young domestic chicks on a red (anemone-like) or a green (leaf-like) background. We determined whether the beetles{\textquoteright} background color and diet affected the chicks{\textquoteright} feeding. Cuticle surface extracts from anemone-fed beetles, but not from cat-food-fed beetles, contained a secondary metabolite characteristic of anemones. Latencies to the first picking-up and consuming of beetles from green backgrounds were shorter than of beetles from red backgrounds. The picking-up order of beetles also indicated that prey from the green background was preferred. The chicks retained this preference when re-tested, three days later. Handling times of anemone-fed beetles were longer than of cat-food-fed beetles. A previous study showed that glaphyrids improve their mate-finding prospects by orienting to large red anemone flowers. Here, female beetles preferred cat-food-fed to anemone-fed males in mate-choice assays, thus anemone-derived chemicals did not increase mating success. Instead, the combined results indicate that A. coronaria{\textquoteright}s red flowers provide a visual signal that may both deter its herbivore{\textquoteright}s predators and attract its mates. To our knowledge, this is the first experimental evidence for a potential protective role of plant-derived visual signals for insect herbivores/pollinators. Keywords: Predation; secondary metabolite; tritrophic interactions; warning coloration; domestic chick; Glaphyridae; pollination.},
url = {/files/dp640.pdf},
author = {Tamar Keasar, Miriam Kishinevsky, Avi Shmida, Yoram Gerchman, Nicka Chinkov, Avi Koplovich, Gadi Katzir}
}
@booklet {sergiuhart-tqcoce2013,
title = {Query Complexity of Correlated Equilibria, The},
journal = {Discussion Papers},
number = {647},
year = {2013},
month = {9},
abstract = {We consider the complexity of finding a Correlated Equilibrium in an n-player game in a model that allows the algorithm to make queries for players{\textquoteright} utilities at pure strategy profiles. Many randomized regret-matching dynamics are known to yield an approximate correlated equilibrium quickly: in time that is polynomial in the number of players, n, the number of strategies of each player, m, and the approximation error, 1/?. Here we show that both randomization and approximation are necessary: no efficient deterministic algorithm can reach even an approximate equilibrium and no efficient randomized algorithm can reach an exact equilibrium.},
url = {http://ma.huji.ac.il/hart/abs/corr-com.html},
author = {Sergiu Hart, Noam Nisan}
}
@booklet {bezalelpeleg-rocuii2013,
title = {Representation of Constitutions Under Incomplete Information},
journal = {Discussion Papers},
number = {634},
year = {2013},
month = {1},
publisher = {Economic Theory 57 (2014), Pages 279-302},
abstract = {We model constitutions by effectivity functions. We assume that the constitution is common knowledge among the members of the society. However, the preferences of the citizen are private information. We investigate whether there exist decision schemes (i. e., functions that map profiles of (dichotomous) preferences on the set of outcomes to lotteries on the set of social states), with the following properties: i) The distribution of power induced by the decision scheme is identical to the effectivity function under consideration; and ii) the (incomplete information) game associated with the decision scheme has a Bayesian Nash equilibrium in pure strategies. If the effectivity function is monotonic and superadditive, then we find a class of decision schemes with the foregoing properties. When applied to n-person games in strategic form, a decision scheme d is a mapping from profiles of (dichotomous) preferences on the set of pure strategy vectors to probability distributions over outcomes (or equivalently, over pure strategy vectors). We prove that for any feasible and individually rational payoff vector of a strategic game, there exists a decision scheme that yields that payoff vector as a (pure) Nash equilibrium payoff in the game induced by the strategic game and the decision scheme. This can be viewed as a kind of purification result.},
url = {/files/Economic Theory, Volume 57 (2014), Issue 2, Pages 279-302},
author = {Bezalel Peleg, Shmuel Zamir}
}
@booklet {neyman-sgwsd2013,
title = {Stochastic Games with Short-Stage Duration},
journal = {Discussion Papers},
number = {636},
year = {2013},
month = {4},
abstract = {We introduce asymptotic analysis of stochastic games with short-stage duration. The play of stage $k$, $k{\textquoteright}geq 0$, of a stochastic game ${\textquoteright}Gamma_{\textquoteright}delta$ with stage duration ${\textquoteright}delta$ is interpreted as the play in time $k{\textquoteright}delta{\textquoteright}leq t0$ as the stage duration ${\textquoteright}delta$ goes to $0$, and study the asymptotic behavior of the value, optimal strategies, and equilibrium. The asymptotic analogs of the discounted, limiting-average, and uniform equilibrium payoffs are defined. Convergence implies the existence of an asymptotic discounted equilibrium payoff, strong convergence implies the existence of an asymptotic limiting-average equilibrium payoff, and exact convergence implies the existence of an asymptotic uniform equilibrium payoff.},
url = {/files/dp636.pdf},
author = {Abraham Neyman}
}
@booklet {aboodi-wgprumb2013,
title = {Why Good People Reevaluate Underived Moral Beliefs?},
journal = {Discussion Papers},
number = {651},
year = {2013},
month = {11},
abstract = {Are good people motivated to behave in accordance the moral truth whatever it is? Michael Smith, who has named this motivation the de-dicto moral motivation, famously criticized it. According to Smith, good people are instead motivated directly by more concrete moral concerns, such as {\textquoteright}the well-being of their fellows, people getting what they deserve, justice, equality, and the like . Here I argue for the non-Smithian view that good people have (also) a de-dicto moral motivation. The argument runs roughly as follows: given that good people tend to behave appropriately, and that in some situations it is appropriate to reevaluate one s underived moral beliefs, good people tend to seriously reevaluate underived moral beliefs sometimes. Theories of motivation have to account for this fact (a point overlooked by Smith and his respondents). What motivates a good person to pay attention to evidence that is contrary to her underived moral beliefs? What does she aim for in reevaluating those beliefs? I argue that the view that good people are motivated to act morally de-dicto is in a better position to explain the relevant facts about good people s reevaluation of underived moral beliefs.},
url = {/files/dp651.pdf},
author = {Aboodi, Ron}
}
@booklet {kareev-aocl2012,
title = {Advantages of Cognitive Limitations},
journal = {Discussion Papers},
number = {611},
year = {2012},
month = {6},
abstract = {Being a product of evolutionary pressures, it would not be surprising to find that what seems to be a limitation of the cognitive system is actually a fine-tuned compromise between a set of competing needs. This thesis is demonstrated using the case of the limited capacity of short-term memory, which is often regarded as the prime example of a cognitive limitation.},
url = {/files/dp611.pdf},
author = {Kareev, Yaakov}
}
@booklet {konigsberg-aa2012,
title = {Aesthetic Autonomy},
journal = {Discussion Papers},
number = {601},
year = {2012},
month = {3},
abstract = {The acquaintance principle (AP) and the view it expresses have recently been tied to a debatesurrounding the possibility of aesthetic testimony, which, plainly put, deals with the questionwhether aesthetic knowledge can be acquired through testimony-typically aesthetic and non-aesthetic descriptions communicated from person to person. In this context a number of suggestions have been put forward opting for a restricted acceptance of AP. This paper is an attempt to restrict AP even more},
url = {/files/dp601.pdf},
author = {Amir Konigsberg}
}
@booklet {fabriziogermano-akorace2012,
title = {Approximate Knowledge of Rationality and Correlated Equilibria},
journal = {Discussion Papers},
number = {610},
year = {2012},
month = {6},
abstract = {We extend Aumann{\textquoteright}s theorem (Aumann, 1987) in deriving correlated equilibria as a consequence of~common priors and common knowledge of rationality by explicitly allowing for non-rational~behavior. We replace the assumption of common knowledge of rationality with a substantially weakernotion, p-belief of rationality, where agents believe the other agents are rational with~probabilities p or more. We show that behavior in this case constitutes a constrained correlated~equilibrium of a doubled game satisfying certain p-belief constraints and characterize the~topological structure of the resulting set of p-rational outcomes. We establish continuity in the~parameters p and show that, for p sufficiently close to one, the p-rational outcomes are close to~the correlated equilibria and, with high probability, supported on strategies that survive the~iterated elimination of strictly dominated strategies. Finally, we extend Aumann and Dreze{\textquoteright}s~theorem (Aumann and Dreze, 2008) on rational expectations of interim types to the broader~p-rational belief systems, and also discuss the case of non-common priors.},
url = {/files/dp610.pdf},
author = {Fabrizio Germano , Peio Zuazo-Garin}
}
@booklet {sergiuhart-armwmi2012,
title = {Approximate Revenue Maximization with Multiple Items},
journal = {Discussion Papers},
number = {606},
year = {2012},
month = {4},
abstract = {Myerson{\textquoteright}s classic result provides a full description of how a seller can maximize revenue when~selling a single item. We address the question of revenue maximization in the simplest possible~multi-item setting: two items and a single buyer who has independently distributed values for theitems, and an additive valuation. In general, the revenue achievable from selling two independent~items may be strictly higher than the sum of the revenues obtainable by selling each of themseparately. In fact, the structure of optimal (i.e., revenue-maximizing) mechanisms for two itemseven in this simple setting is not understood.In this paper we obtain approximate revenue optimization results using two simple auctions: that~of selling the items separately, and that of selling them as a single bundle. Our main results~(which are of a "direct sum" variety, and apply to any distributions) are as follows. Selling theitems separately guarantees at least half the revenue of the optimal auction; for identicallydistributed items, this becomes at least 73\% of the optimal revenue.~For the case of k > 2 items, we show that selling separately guarantees at least a c/log^2 k~fraction of the optimal revenue; for identically distributed items, the bundling auction yields at~least a c/log k fraction of the optimal revenue.},
url = {http://www.ma.huji.ac.il/hart/abs/m-approx.html},
author = {Sergiu Hart, Noam Nisan}
}
@booklet {aloewatcc2012,
title = {Automatic Learning of Evaluation, with Applications to Computer Chess},
journal = {Discussion Papers},
number = {613},
year = {2012},
month = {7},
abstract = {A new and fast learning method is described in the context of teaching a program to play chess. A theory of the meaning of a position evaluation is developed, and is then confronted with a large collection of games played by masters or other programs. The program learns by fitting its evaluation to better predict the results of the games. The method has been employed by a top-rated program for the past 10 years, and has earned several world championships and successful matches against the world{\textquoteright}s best grandmasters for the program. The effectiveness of the method is demonstrated by showing its successful prediction of known playing strength of the programs.},
url = {/files/dp613.pdf}
}
@booklet {babichenko-bdilag2012,
title = {Best-Reply Dynamics in Large Anonymous Games},
journal = {Discussion Papers},
number = {600},
year = {2012},
month = {3},
abstract = {We consider small-influence anonymous games with a large number of players $n$ where every player has two actions. For this class of games we present a best-reply dynamic with the following two properties. First, the dynamic reaches Nash approximate equilibria fast (in at most $cn log n$ steps for some constant $c>0$). Second, Nash approximate equilibria are played by the dynamic with a limit frequency of at least $1-e^-c{\textquoteright}n$ for some constant $c{\textquoteright}>0$.},
url = {/files/dp600.pdf},
author = {Yakov Babichenko}
}
@booklet {levy-acsogwnses2012,
title = {A Cantor Set of Games with No Shift-Homogeneous Equilibrium Selection},
journal = {Discussion Papers},
number = {607},
year = {2012},
month = {4},
abstract = {We construct a continuum of games on a countable set of players that does not possess a measurable equilibrium selection that satisfies a natural homogeneity property. The explicit nature of the construction yields counterexamples to the existence of equilibria in models with overlapping generations and in games with a continuum of players.},
url = {/files/dp607.pdf},
author = {Yehuda (John) Levy}
}
@booklet {edhan-cvomgac2012,
title = {Continuous Values of Market Games Are Conic},
journal = {Discussion Papers},
number = {623},
year = {2012},
month = {8},
abstract = {We prove that every continuous value on a space of vector measure market games $Q$, containing the space of nonatomic measures $NA$, has the textitconic property, i.e., if a game $vin Q$ coincides with a nonatomic measure $nu$ on a conical diagonal neighborhood then $varphi(v)=nu$. We deduce that every continuous value on the linear space $mathcal M$, spanned by all vector measure market games, is determined by its values on $mathcalLM$ - the space of vector measure market games which are Lipschitz functions of the measures.},
url = {/files/dp623.pdf},
author = {Omer Edhan}
}
@booklet {neyman-csg2012,
title = {Continuous-Time Stochastic Games},
journal = {Discussion Papers},
number = {616},
year = {2012},
month = {8},
abstract = {Every continuous-time stochastic game with finitely many states and actions has a uniform andlimiting-average equilibrium payoff.},
url = {/files/dp616.pdf},
author = {Abraham Neyman}
}
@booklet {levy-csgofd2012,
title = {Continuous-Time Stochastic Games of Fixed Duration},
journal = {Discussion Papers},
number = {617},
year = {2012},
month = {8},
abstract = {We study non-zero-sum continuous-time stochastic games, also known as continuous-time Markov~games, of fixed duration. We concentrate on Markovian strategies. We show by way of example that~equilibria need not exist in Markovian strategies, but they always exist in Markovian~public-signal correlated strategies. To do so, we develop criteria for a strategy profile to be an~equilibrium via differential inclusions, both directly and also by modeling continuous-time~stochastic as differential games and using the Hamilton-Jacobi-Bellman equations. We also give an~interpretation of equilibria in mixed strategies in continuous-time, and show that approximate~equilibria always exist.},
url = {/files/dp617.pdf},
author = {Yehuda (John) Levy}
}
@booklet {hellman-csacp2012,
title = {Countable Spaces and Common Priors},
journal = {Discussion Papers},
number = {604},
year = {2012},
month = {4},
publisher = {Forthcoming in IJGT},
abstract = {We show that the no betting characterisation of the existence of common priors over finite type spaces extends only partially to improper priors in the countably infinite state space context: the existence of a common prior implies the absence of a bounded agreeable bet, and the absence of a common improper prior implies the existence of a bounded agreeable bet. However, a type space that lacks a common prior but has a common improper prior may or may not have a bounded agreeable bet. The iterated expectations characterisation of the existence of common priors extends almost as is, as a sufficient and necessary condition, from finite spaces to countable spaces, but fails to serve as a characterisation of common improper priors. As a side-benefit of the proofs here, we also obtain a constructive proof of the no betting characterisation in finite spaces.},
url = {/files/dp604.pdf},
author = {Ziv Hellman}
}
@booklet {michamandel-csbdacl2012,
title = {Cross-Sectional Sampling, Bias, Dependence, and Composite Likelihood},
journal = {Discussion Papers},
number = {614},
year = {2012},
month = {7},
abstract = {A population that can be joined at a known sequence of discrete times is sampled cross-sectionally, and the sojourn times of individuals in the sample are observed. It is well known that cross-sectioning leads to length-bias, but less well known that it may result also in dependence among the observations, which is often ignored. It is therefore important to understand and to account for this dependence when estimating the distribution of sojourn times in the population.In this paper, we study conditions under which observed sojourn times are independent and conditions under which treating observations as independent, using the product of marginals in spite of dependence, results in proper inference. The latter is known as the Composite Likelihood approach. We study parametric and nonparametric inference based on Composite Likelihood, and provide conditions for consistency, and further asymptotic properties, including normal and non-normal distributional limits of estimators. We show that Composite Likelihood leads to good estimators under certain conditions, and illustrate that it may fail without them. The theoretical study is supported by simulations. We apply the proposed methods to two data sets collected by cross-sectional designs: data on hospitalization time after bowel and hernia surgeries, and data on service times at our university.},
url = {/files/dp614.pdf},
author = {Micha Mandel, Yosef Rinott}
}
@booklet {data2012,
title = {Deludedly Agreeing to Agree},
journal = {Discussion Papers},
number = {605},
year = {2012},
month = {4},
abstract = {We study conditions relating to the impossibility of agreeing to disagree in models of interactive~KD45 belief (in contrast to models of S5 knowledge, which are used in nearly all the agreements~literature). Agreement and disagreement are studied under models of belief in three broad settings: non-probabilistic decision models, probabilistic belief revision of priors, and dynamic communication among players. We show that even when the truth axiom is not assumed it turns out that players will find it impossible to agree to disagree under fairly broad conditions.},
url = {/files/dp605.pdf}
}
@booklet {levy-adsgwnsetcoact2012,
title = {A Discounted Stochastic Game with No Stationary Equilibria: The Case of Absolutely Continuous Transitions},
journal = {Discussion Papers},
number = {612},
year = {2012},
month = {6},
abstract = {We present a discounted stochastic game with a continuum of states, finitely many players and actions, such that although all transitions are absolutely continuous w.r.t. a fixed measure, it possesses no stationary equilibria. This absolute continuity condition has been assumed in many equilibrium existence results, and the game presented here complements a recent example of ours of a game with no stationary equilibria but which possess deterministic transitions. We also show that if one allows for compact action spaces, even games with state-independent transitions need not possess stationary equilibria.},
url = {/files/dp612.pdf},
author = {Yehuda (John) Levy}
}
@booklet {levy-adsgwnsne2012,
title = {A Discounted Stochastic Game with No Stationary Nash Equilibrium},
journal = {Discussion Papers},
number = {596},
year = {2012},
month = {1},
abstract = {We present an example of a discounted stochastic game with a continuum of states, finitely many players and actions, and deterministic transitions, that possesses no measurable stationary equilibria, or even stationary approximate equilibria. The example is robust to perturbations of the payoffs, the transitions, and the discount factor, and hence gives a strong nonexistence result for stationary equilibria. The example is a game of perfect information, and hence it also does not possess stationary extensive-form correlated equilibrium. Markovian equilibria are also shown not to exist in appropriate perturbations of our example.},
url = {/files/dp596.pdf},
author = {Yehuda (John) Levy}
}
@booklet {motimichaeli-tdorpusp2012,
title = {Distribution of Revealed Preferences Under Social Pressure, The},
journal = {Discussion Papers},
number = {609},
year = {2012},
month = {5},
abstract = {This paper studies theoretically the aggregate distribution of revealed preferences when~heterogeneous individuals make the trade o? between being true to their real opinions and~conforming to a social norm. We show that in orthodox societies, individuals will tend to either~conform fully or ignore the social norm while individuals in liberal societies will tend to~compromise between the two extremes. The model sheds light on phenomena such as polarization,~alienation and hypocrisy. We also show that societies with orthodox individuals will be liberal on~aggregate unless the social norm is upheld by an authority. This suggests that orthodoxy cannot be maintained under pluralism.},
url = {/files/dp609.pdf},
author = {Moti Michaeli , Daniel Spiro}
}
@booklet {schreiber-aeiorr2012,
title = {An Economic Index of Relative Riskiness},
journal = {Discussion Papers},
number = {597},
year = {2012},
month = {2},
abstract = {In their seminal works, Arrow (1965) and Pratt (1964) defined two aspects of risk aversion: absolute risk aversion and relative risk aversion. Based on their definitions, we define two aspects of risk: absolute risk and relative risk. We consider situations in which, by making an investment, an agent exchanges a certain amount of wealth w by a random distributed level of wealth W. In such situations, we define absolute risk as the riskiness of a gamble that is distributed as W-w, and relative risk as the riskiness of a security that is distributed as W/w. We measure absolute risk by the Aumann and Serrano (2008) index of riskiness and relative risk by an equivalent index that we develop in this paper. The two concepts of risk do not necessarily agree on which one of two investments is riskier, and hence they capture two different aspects of risk.},
url = {/files/dp597.pdf},
author = {Amnon Schreiber}
}
@booklet {amosschurr-teopoub2012,
title = {Effect of Perspective on Unethical Behavior, The},
journal = {Discussion Papers},
number = {620},
year = {2012},
month = {8},
abstract = {In two experiments, we explored how the perspective through which individuals view their decisions influences their moral behavior. To do this we employed a computerized "Is that the answer you had in mind?" trivial-pursuit style game. The game challenges individuals{\textquoteright} integrity because cheating during play cannot be detected. Perspective, whether local or global, was manipulated: In Experiment 1 the choice procedure was used to evoke a local or an integrative perspective of one{\textquoteright}s choices, whereas in Experiment 2, perspective was manipulated through priming. Across all the experiments, we observed that when given an incentive to cheat, the adoption of a local perspective increased cheating, as evidenced by overall higher reported success rates. These findings have clear implications for explaining and controlling behavior in other situations (e.g., exercising, dieting) in which the perspective one takes is a matter of choice.},
url = {/files/dp620.pdf},
author = {Amos Schurr, Ilana Ritov, Yaakov Kareev, Judith Avrahami}
}
@booklet {hellman-agwnbae2012,
title = {A Game with No Bayesian Approximate Equilibria},
journal = {Discussion Papers},
number = {615},
year = {2012},
month = {7},
abstract = {Simon (2003) presented an example of a 3-player Bayesian games with no Bayesian equilibria but it has been an open question whether or not there are games with no approximate Bayesian equilibria.~We present an example of a Bayesian game with two players, two actions and a continuum of states that possesses no approximate Bayesian equilibria, thus resolving the question. As a side benefit we also have for the first time an an example of a 2-player Bayesian game with no Bayesian equilibria and an example of a strategic-form game with no approximate Nash equilibria. The construction makes use of techniques developed in an example by Y. Levy of a discounted stochastic game with no stationary equilibria.},
url = {/files/dp615.pdf},
author = {Ziv Hellman}
}
@booklet {abbamkrieger-gbafpooaoada2012,
title = {Generalized Bomber and Fighter Problems: Offline Optimal Allocation of a Discrete Asset},
journal = {Discussion Papers},
number = {625},
year = {2012},
month = {9},
publisher = {Published in Journal of Applied Probability, (2013), Vol. 50, 403-418.},
abstract = {The classical Bomber problem concerns properties of the optimal allocation policy of arsenal for an airplane equipped with a given number, n, of anti-aircraft missiles, at a distance t > 0 from its destination, which is intercepted by enemy planes appearing according to a homogeneous Poisson process. The goal is to maximize the probability of reaching its destination. The Fighter problem deals with a similar situation, but the goal is to shoot down as many enemy planes as possible. The optimal allocation policies are dynamic, depending upon the times at which the enemy is met. The present paper generalizes these problems by allowing the number of enemy planes to have any distribution, not just Poisson. This implies that the optimal strategies can no longer be dynamic, and are, in our terminology, offline. We show that properties similar to those holding for the classical problems hold also in the present case. Whether certain properties hold that remain open questions in the dynamic version are resolved in the offline version. Since {\textquoteleft}time{\textquoteright} is no longer a meaningful way to parametrize the distributions for the number of encounters, other more general orderings of distributions are needed. Numerical comparisons between the dynamic and offliine approaches are given.},
url = {/files/dp625.pdf},
author = {Abba M. Krieger, Ester Samuel-Cahn}
}
@booklet {ofriraviv-hrhaptnaaiha2012,
title = {How Recent History Affects Perception: the Normative Approach and Its Heuristic Approximation},
journal = {Discussion Papers},
number = {628},
year = {2012},
month = {10},
abstract = {There is accumulating evidence that prior knowledge about expectations plays an important role in perception. The Bayesian framework is the standard computational approach to explain how prior knowledge about the distribution of expected stimuli is incorporated with noisy observations in order to improve performance. However, it is unclear what information about the prior distribution is acquired by the perceptual system over short periods of time and how this information is utilized in the process of perceptual decision making. Here we address this question using a simple two-tone discrimination task. We find that the contraction bias , in which small magnitudes are overestimated and large magnitudes are underestimated, dominates the pattern of responses of human participants. This contraction bias is consistent with the Bayesian hypothesis in which the true prior information is available to the decision-maker. However, a trial-by-trial analysis of the pattern of responses reveals that the contribution of most recent trials to performance is overweighted compared with the predictions of a standard Bayesian model. Moreover, we study participants performance in a-typical distributions of stimuli and demonstrate substantial deviations from the ideal Bayesian detector, suggesting that the brain utilizes a heuristic approximation of the Bayesian inference. We propose a biologically plausible model, in which decision in the two-tone discrimination task is based on a comparison between the second tone and an exponentially-decaying average of the first tone and past tones. We show that this model accounts for both the contraction bias and the deviations from the ideal Bayesian detector hypothesis. These findings demonstrate the power of Bayesian-like heuristics in the brain, as well as their limitations in their failure to fully adapt to novel environments.},
url = {/files/dp628.pdf},
author = {Ofri Raviv, Merav Ahissar, Yonatan Loewenstein}
}
@booklet {sergiuhart-mrwmgnaoo2012,
title = {Maximal Revenue with Multiple Goods: Nonmonotonicity and Other Observations},
journal = {Discussion Papers},
number = {630},
year = {2012},
month = {11},
publisher = {Thoretical Economics},
abstract = {Consider the problem of maximizing the revenue from selling a number of goods to a single buyer. We show that, unlike the case of one good, when the buyer{\textquoteright}s values for the goods increase the seller{\textquoteright}s maximal revenue may well decrease. We also provide a characterization of revenue-maximizing mechanisms (more generally, of "seller-favorable" mechanisms) that circumvents nondifferentiability issues. Finally, through simple and transparent examples, we clarify the need for and the use of randomization when maximizing revenue in the multiple-goods versus the one-good case.},
url = {/files/Paper},
author = {Sergiu Hart, Philip J. Reny}
}
@booklet {samuel-cahn-tnspasroecv2012,
title = {Noisy Secretary Problem and Some Results on Extreme Concomitant Variables, The},
journal = {Discussion Papers},
number = {599},
year = {2012},
month = {2},
abstract = {The classical secretary problem for selecting the best item is studied when the actual values of the items are observed with noise. One of the main appeals of the secretary problem is that the optimal strategy is able to find the best observation with the nontrivial probability of about 0.37, even when the number of observations is arbitrarily large. The results are strikingly different when the quality of the secretaries are observed with noise. If there is no noise, then the only information that is needed is whether an observation is the best among those already observed. Since observations are assumed to be i.i.d. this is distribution free. In the case of noisy data, the results are no longer distrubtion free. Furthermore, one needs to know the rank of the noisy observation among those already seen. Finally, the probability of finding the best secretary often goes to 0 as the number of obsevations, n, goes to infinity. The results depend heavily on the behavior of pn, the probability that the observation that is best among the noisy observations is also best among the noiseless observations. Results involving optimal strategies if all that is available is noisy data are described and examples are given to elucidate the results.},
url = {/files/db599.pdf},
author = {Samuel-Cahn, Abba M. Krieger and Ester}
}
@booklet {salomonisrael-obnvibpaua2012,
title = {Oxytocin, but Not Vasopressin, Increases Both Parochial and Universal Altruism},
journal = {Discussion Papers},
number = {598},
year = {2012},
month = {2},
publisher = {Psychoneuroendocrinology (forthcoming)},
abstract = {In today{\textquoteright}s increasingly interconnected world, deciding with whom and at what level to cooperatebecomes a matter of increasing importance as societies become more globalized and large-scalecooperation becomes a viable means of addressing global issues. This tension can play out viacompetition between local (e.g. within a group) and global (e.g., between groups) interests. Despiteresearch highlighting factors influencing cooperation in such multi-layered situations, theirbiological basis is not well understood. In a double-blind placebo controlled study, we investigatedthe influence of intranasally administered oxytocin and arginine vasopressin on cooperativebehavior at local and global levels. We find that oxytocin causes an increase in both thewillingness to cooperate and the expectation that others will cooperate at both levels. In contrast,participants receiving vasopressin did not differ from those receiving placebo in their cooperativebehavior. Our results highlight the selective role of oxytocin in intergroup cooperative behavior.},
url = {/files/dp598.pdf},
author = {Salomon Israel, Ori Weisel, Richard P. Ebstein, and Gary Bornstein}
}
@booklet {edhan-pinpcte2012,
title = {Payoffs in Nondifferentiable Perfectly Competitive TU Economies},
journal = {Discussion Papers},
number = {629},
year = {2012},
month = {10},
abstract = {We prove that a single-valued solution of perfectly competitive TU economies underling nonatomic vector measure market games is uniquely determined as the Mertens (1988) value by four plausible value-related axioms. Since the Mertens value is always in the core of an economy, this result provides an axiomatization of the Mertens value as a core-selection. Previous works on this matter assumed the economies to be either differentiable (e.g., Dubey and Neyman (1984)) or of uniform finite type (e.g., Haimanko (2002). This work does not assume that, thus it contributes to the axiomatic study of payoffs in perfectly competitive economies in general.},
url = {/files/dp629.pdf},
author = {Omer Edhan}
}
@booklet {konigsberg-trpod2012,
title = {Real Problem of Disagreement, The},
journal = {Discussion Papers},
number = {608},
year = {2012},
month = {4},
abstract = {The problem of disagreement asks about the appropriate response (typically the response of a peer) upon encountering a disagreement between peers. The responses proposed in the literature offer different solutions to the problem, each of which has more or less normative appeal. Yet none of these seems to engage with what seems to be the real problem of disagreement. It is my aim in this paper to highlight what I think the real problem of disagreement is. It is, roughly, the problem of deciding whether a revisionary tactic is appropriate following the discovery of disagreement as well as deciding which revisionary tactic is appropriate. This, I will show, is a slippery and inevitable problem that any discussion of disagreement ought to deal with.},
url = {/files/dp608.pdf},
author = {Amir Konigsberg}
}
@booklet {edhan-roppolv2012,
title = {Representations Of Positive Projections On Lipschitz Vector},
journal = {Discussion Papers},
number = {624},
year = {2012},
month = {8},
abstract = {Among the single-valued solution concepts studied in cooperative game theory and economics, those which are also positive projections play an important role. The value, semivalues, and quasivalues of a cooperative game are several examples of solution concepts which are positive projections. These solution concepts are known to have many important applications in economics. In many applications the specific positive projection discussed is represented as an expectation of marginal contributions of agents to "random" coalitions. Usually these representations are used to characterize positive projections obeying certain additional axioms. It is thus of interest to study the representation theory of positive projections and its relation with some common axioms. We study positive projections defined over certain spaces of nonatomic Lipschitz vector measure games. To this end, we develop a general notion of "calculus" for such games, which in a manner extends the notion of the Radon-Nykodim derivative for measures. We prove several representation results for positive projections, which essentially state that the image of a game under the action of a positive projection can be represented as an averaging of its derivative w.r.t. some vector measure. We then introduce a specific calculus for the space $mathcalCON$ generated by concave, monotonically nondecreasing, and Lipschitz continuous functions of finitely many nonatomic probability measures. We study in detail the properties of the resulting representations of positive projections on $mathcalCON$ and especially those of values on $mathcalCON$. The latter results are of great importance in various applications in economics.},
url = {/files/dp624.pdf},
author = {Omer Edhan}
}
@booklet {einavhart-rorciagvabw2012,
title = {Reversal of Risky Choice in a Good Versus a Bad World},
journal = {Discussion Papers},
number = {619},
year = {2012},
month = {8},
abstract = {In many situations one has to choose between risky alternatives, knowing only one{\textquoteright}s past experience with those alternatives. Such decisions can be made in more - or less - benevolent settings or {\textquoteright}worlds{\textquoteright}. In a {\textquoteright}good world{\textquoteright}, high payoffs are more frequent than low payoffs, and vice versa in a {\textquoteright}bad world{\textquoteright}. In two studies, we explored whether the world influences choice behavior: Whether people behave differently in a {\textquoteright}good{\textquoteright} versus a {\textquoteright}bad{\textquoteright} world. Subjects made repeated, incentivized choices between two gambles, one riskier than the other, neither offering a sure amount. The gambles were held equivalent in terms of their expected value, differing only in variance. Worlds were manipulated both between- and within-subject: In Study 1, each subject experienced one world - good, bad or mediocre; in Study 2, each subject experienced both a good and a bad world. We examine the aggregate pattern of behavior (average choice frequencies), and the dynamics of behavior across time. We observed significant differences in the aggregate pattern: In a good world, subjects tended to choose the riskier alternative, and vice versa in a bad world. The pattern of the dynamics, i.e., the transitions from round to round, were best explained by a reaction to the counterfactual reward: When the unchosen alternative yielded a better payoff, the tendency to subsequently choose it was higher. We compared these two patterns to the predictions of three types of models: Reinforcement learning, regret-based and disappointment-based models. Behavior was in line only with the predictions of regret-based models.},
url = {/files/dp619.pdf},
author = {Einav Hart, Yaakov Kareev, , Judith Avrahami}
}
@booklet {michaeli-rfsog2012,
title = {Riskiness for Sets of Gambles},
journal = {Discussion Papers},
number = {603},
year = {2012},
month = {3},
abstract = {Aumann{\textendash}Serrano (2008) and Foster{\textendash}Hart (2009) suggest two new riskiness measures, each of which enables one to elicit a complete and objective ranking of gambles according to their riskiness.{\textquoteright}Hart (2011) shows that both measures can be obtained by looking at a large set of utilityfunctions and applying "uniform rejection criteria" to rank the gambles in accordance with this{\textquoteright}set of utilities. We use the same "uniform rejection criteria" to extend these two riskiness{\textquoteright}measures to the realm of uncertainty and develop complete and objective rankings of sets ofgambles, which arise naturally in models of decision making under uncertainty.},
url = {/files/dp603.pdf},
author = {Moti Michaeli}
}
@booklet {hananshteingart-trofiiol2012,
title = {Role of First Impression in Operant Learning, The},
journal = {Discussion Papers},
number = {626},
year = {2012},
month = {9},
abstract = {We quantified the effect of first experience on behavior in operant learning and studied its underlying computational principles. To that goal, we analyzed more than 200,000 choices in a repeated-choice experiment. We found that the outcome of the first experience has a substantial and lasting effect on participants{\textquoteright} subsequent behavior, which we term outcome primacy. We found that this outcome primacy can account for much of the underweighting of rare events, where participants apparently underestimate small probabilities. We modeled behavior in this task using a standard, model-free reinforcement learning algorithm. In this model, the values of the different actions are learned over time and are used to determine the next action according to a predefined action-selection rule. We used a novel non-parametric method to characterize this action-selection rule and showed that the substantial effect of first experience on behavior is consistent with the reinforcement learning model if we assume that the outcome of first experience resets the values of the experienced actions, but not if we assume arbitrary initial conditions. Moreover, the predictive power of our resetting model outperforms previously published models regarding the aggregate choice behavior. These findings suggest that first experience has a disproportionately large effect on subsequent actions, similar to primacy effects in other fields of cognitive psychology. The mechanism of resetting of the initial conditions which underlies outcome primacy may thus also account for other forms of primacy.},
url = {/files/dp626.pdf},
author = {Hanan Shteingart, Yonatan Loewenstein}
}
@booklet {mikelalvarez-mozos-svfcg2012,
title = {Spectrum Value for Coalitional Games},
journal = {Discussion Papers},
number = {618},
year = {2012},
month = {8},
publisher = {Forthcoming in GEB},
abstract = {Assuming a {\textquoteleft}spectrum{\textquoteright} or ordering on the players of a coalitional game, as in a political spectrum in a parliamentary situation, we consider a variation of the Shapley value in which coalitions may only be formed if they are connected with respect to the spectrum. This results in a naturally asymmetric power index in which positioning along the spectrum is critical. We present both a characterisation of this value by means of properties and combinatoric formulae for calculating it. In simple majority games, the greatest power accrues to {\textquoteleft}moderate{\textquoteright} players who are located neither at the extremes of the spectrum nor in its centre. In supermajority games, power increasingly accrues towards the extremes, and in unaninimity games all power is held by the players at the extreme of the spectrum.},
url = {/files/dp618.pdf},
author = {Mikel Alvarez-Mozos, Ziv Hellman, Eyal Winter}
}
@booklet {amosschurr-ttbprcirpt2012,
title = {Taking the Broad Perspective: Risky Choices in Repeated Proficiency Tasks},
journal = {Discussion Papers},
number = {621},
year = {2012},
month = {9},
abstract = {In performing skill-based tasks individuals often face a choice between easier, less demanding alternatives, but ones whose expected payoffs in case of success are lower, and difficult, more demanding alternatives whose expected payoffs in case of success are higher: What piece to play in a musical competition, whether to operate a camera in a manual or automatic mode, etc. We maintain that the decision-maker{\textquoteright}s perspective - whether narrow or broad - is one determinant of choice, and subsequent satisfaction, in such tasks. In two experiments involving dart throwing and answering general-knowledge trivia questions, perspective was manipulated through choice procedure: A sequential choice procedure, with task difficulty chosen one at a time, was used to induce a narrow perspective while an aggregate-choice procedure was used to induce a broad perspective. In two additional experiments, both involving a sequential-choice procedure perspective was manipulated through priming. As predicted, in all experiments inducement of a narrow perspective resulted in a higher probability of choosing the more difficult task; it also led to lower-than-anticipated overall satisfaction.},
url = {/files/dp621.pdf},
author = {Amos Schurr, Yaakov Kareev, Judith Avrahami, Ilana Ritov}
}
@booklet {bavly-uittd2012,
title = {Uncertainty in the Traveler{\textquoteright}s Dilemma},
journal = {Discussion Papers},
number = {595},
year = {2012},
month = {1},
abstract = {The paper analyzes a perturbation on the players{\textquoteright} knowledge of the game in the traveler{\textquoteright}s dilemma, by introducing some uncertainty about the range of admissible actions. The ratio between changes in the outcomes and the size of perturbation is shown to grow exponentially in the range of the given game. This is consistent with the intuition that a wider range makes the outcome of the traveler{\textquoteright}s dilemma more paradoxical. We compare this with the growth of the elasticity index (Bavly (2011)) of this game.},
url = {/files/dp595.pdf},
author = {Gilad Bavly}
}
@booklet {edhan-voemg2012,
title = {Values of Exact Market Games},
journal = {Discussion Papers},
number = {627},
year = {2012},
month = {9},
abstract = {We prove that a single-valued solution of perfectly competitive TU economies underling nonatomic exact market games is uniquely determined as the Mertens value by four plausible value-related axioms. Since the Mertens value is always a core element, this result provides an axiomatization of the Mertens value as a core-selection. Previous works in this direction assumed the economies to be either di erentiable (e.g., Dubey and Neyman [9]) or of uniform nite-type (e.g., Haimanko [14]). Our work does not assume that, thus it contributes to the axiomatic study of payo s in perfectly competitive economies (or values of their derived market games) in general. In fact, this is the rst contribution in this direction.},
url = {/files/dp627R.pdf},
author = {Omer Edhan}
}
@booklet {edhan-vonvmg2012,
title = {Values of Nondifferentiable Vector Measure Games},
journal = {Discussion Papers},
number = {602},
year = {2012},
month = {3},
abstract = {We introduce ideas and methods from distribution theory into value theory. This novel approach{\textquoteright}enables us to construct new diagonal formulas for the Mertens value and the Neyman value on a{\textquoteright}large space of non-differentiable games. This in turn enables us to give an affirmative answer to{\textquoteright}the question, first posed by Neyman, whether the Mertens value and the Neyman value coincide{\textquoteright}"modulo Banach limits"? The solution is an intermediate result towards a characterization of{\textquoteright}values of norm 1 of vector measure games with bounded variation.},
url = {/files/dp602.pdf},
author = {Omer Edhan}
}
@booklet {ilanyaniv-wgwapwsibtgyoouptia2012,
title = {When Guessing What Another Person Would Say Is Better Than Giving Your Own Opinion: Using Perspective-Taking to Improve Advice-Taking},
journal = {Discussion Papers},
number = {622},
year = {2012},
month = {8},
publisher = {Journal of Experimental Social Psychology 48 (2012) 1022 {\textquotedblleft}1028},
abstract = {We investigated how perspective-taking might be used to overcome bias and improve advice-based judgments. Decision makers often tend to underweight the opinions of others relative to their own, and thus fail to exploit the wisdom of others. We tested the idea that decision makers taking the perspective of another person engage a less egocentric mode of processing of advisory opinions and thereby improve their accuracy. In Studies 1-2, participants gave their initial opinions and then considered a sample of advisory opinions in two conditions. In one condition (self-perspective), they were asked to give their best advice-based estimates. In the second (other-perspective), they were asked to give advice-based estimates from the perspective of another judge. The dependent variables were the participants{\textquoteright} accuracy and indices that traced their judgment policy. In the self-perspective condition participants adhered to their initial opinions, whereas in the other-perspective condition they were far less egocentric, weighted the available opinions more equally and produced more accurate estimates. In Study 3, initial estimates were not elicited, yet the data patterns were consistent with these conclusions. All the studies suggest that switching perspectives allows decision makers to generate advice-based judgments that are superior to those they would otherwise have produced. We discuss the merits of perspective-taking as a procedure for correcting bias, suggesting that it is theoretically justifiable, practicable, and effective.},
url = {/files/dp622.pdf},
author = {Ilan Yaniv, Shoham Choshen-Hillel}
}
@booklet {yaniv-aatcospbiaapb2011,
title = {Agency and the Construction of Social Preference: Between Inequality Aversion and Prosocial Behavior},
journal = {Discussion Papers},
number = {573},
year = {2011},
month = {5},
publisher = {Forthcoming in Journal of Personality and Social Psychology},
abstract = {The term social preference refers to decision makers satisfaction with their own outcomes and those attained by comparable others. The present research was inspired by what appears to be a discrepancy in the literature on social preferences "specifically, between a class of studies demonstrating people s concern with inequality and others documenting their motivation to increase social welfare. We propose a theoretical framework to account for this puzzling difference. In particular, we argue that a characteristic of the decision setting "an individual s role in creating the outcomes, referred to as agency "critically affects decision makers weighting of opposing social motives. Namely, in settings where people can merely judge the outcomes, but cannot affect them ( low agency ), their concern with inequality figures prominently. In contrast, in settings where people determine the outcomes for themselves and others ( high agency ), their concern with the welfare of others is prominent. Three studies employing a new salary-allocation paradigm document a robust effect of agency. In the high-agency condition participants had to assign salaries, while in the low-agency condition they indicated their satisfaction with equivalent predetermined salaries. We found that compared with low-agency participants, high-agency participants were less concerned with disadvantageous salary allocations and were even willing to sacrifice a portion of their pay to better others outcomes. The effects of agency are discussed in connection to inequality aversion, social comparison, prosocial behavior, and preference construction.},
url = {/files/dp573.pdf},
author = {Yaniv, Shoham Choshen-Hillel and Ilan}
}
@booklet {nehama-aja2011,
title = {Approximate Judgement Aggregation},
journal = {Discussion Papers},
number = {574},
year = {2011},
month = {1},
publisher = {Published in WINE 2011 Pp 302-313 \& .Annals of Mathematics \& Artificial Intelligence - Special Issue Is on Algorithms, Approximation, and Empirical Studies in Behavorial and Computational Social Choice Volume 68, Issue 1-3 , Pp 91-134},
abstract = {In this paper we analyze judgement aggregation problems in which a group of agents independently votes on a set of complex propositions that has some interdependency constraint between them(e.g., transitivity when describing preferences). We consider the issue of judgement aggregation from the perspective of approximation. That is, we generalize the previous results by studying approximate judgement aggregation. We relax the main two constraints assumed in the current literature, Consistency and Independence and consider mechanisms that only approximately satisfy these constraints, that is, satisfy them up to a small portion of the inputs. The main question we raise is whether the relaxation of these notions significantly alters the class of satisfying aggregation mechanisms. The recent works for preference aggregation of Kalai, Mossel, and Keller fit into this framework. The main result of this paper is that, as in the case of preference aggregation, in the case of a subclass of a natural class of aggregation problems termed {\textquoteleft}truth-functional agendas{\textquoteright}, the set of satisfying aggregation mechanisms does not extend non-trivially when relaxing the constraints. Our proof techniques involve Boolean Fourier transform and analysis of voter influences for voting protocols.The question we raise for Approximate Aggregation can be stated in terms of Property Testing. For instance, as a corollary from our result we get a generalization of the classic result for property testing of linearity of Boolean functions.},
url = {/files/Published version - Annals of Mathematics and Artificial Intelligence},
author = {Ilan Nehama}
}
@booklet {babichenko-atateb2011,
title = {Average Testing and the Efficient Boundary},
journal = {Discussion Papers},
number = {567},
year = {2011},
month = {2},
abstract = {We propose a simple adaptive procedure for playing strategic games: average testing. In this procedure each player sticks to her current strategy if it yields a payoff that exceeds her average payoff by at least some fixed epsilon > 0; otherwise she chooses a strategy at random. We consider generic two-person games where both players play according to the average testing procedure on blocks of k-periods. We demonstrate that for all k large enough, the pair of time-average payoffs converges (almost surely) to the 3epsilon-Pareto efficient boundary.},
url = {/files/dp567.pdf},
author = {Babichenko, Itai Arieli and Yakov}
}
@booklet {mayabar-hillel-beatlh2011,
title = {Behavioral Economics and the Law (in Hebrew)},
journal = {Discussion Papers},
number = {582},
year = {2011},
month = {7},
publisher = {{\texttimes}{\textquotedblright}{\texttimes}{\textquoteright}{\texttimes}{\texttimes}\copyright{\texttimes}{\textquotedblright} {\texttimes}{\textquotedblright}{\texttimes}{\guilsinglright}{\texttimes}{\texttimes}{\guilsinglright}{\texttimes}{\texttimes}{\texttimes}{\textordfeminine} {\texttimes}{\texttimes}\v z{\texttimes}\copyright{\texttimes}{\texttimes}\~{} {\texttimes}{\textquoteleft}{\texttimes}{\textcent}{\texttimes}{\textasciidieresis}{\texttimes}{\texttimes}{\guilsinglright}{\texttimes}{\textordfeminine} {\texttimes}{\texttimes}*{\texttimes}{\textasciidieresis}{\texttimes}{\texttimes}{\texttimes} {\texttimes}{\texttimes}{\textasciidieresis}{\texttimes}*{\texttimes}\S{\texttimes}{\textbrokenbar}{\textquoteright}{\texttimes}{\texttimes}{\textquotedblright}. {\texttimes}\v z{\texttimes}{\guilsinglright}{\texttimes}*{\texttimes}{\"Y} {\texttimes}{\textexclamdown}{\texttimes}{\texttimes}\S{\texttimes}{\textasciidieresis} {\texttimes}{\texttimes}\v z{\texttimes}{\textendash}-{\texttimes}\S{\texttimes}{\textasciidieresis}{\texttimes} {\texttimes}{\textendash}-{\texttimes}\S{\texttimes}{\texttimes}\S{\texttimes}{\textquotedblright} {\texttimes}*{\texttimes}\v z{\texttimes}\copyr},
url = {/files/dp582.pdf},
author = {Maya Bar-Hillel, Uriel Procaccia}
}
@booklet {motyamar-bnalmp2011,
title = {Brand Names Act Like Marketing Placebos},
journal = {Discussion Papers},
number = {566},
year = {2011},
month = {2},
abstract = {This research illustrates the power of reputation, such as that embodied in brand names, demonstrating that names can enhance objective product efficacy. Study participants facing a glaring light were asked to read printed words as accurately and as quickly as they could, receiving compensation proportional to their performance. Those wearing sunglasses tagged Ray-Ban made fewer errors, yet read more quickly, than those wearing the identical pair of sunglasses when tagged Mango (a less prestigious brand). Similarly, ear-muffs blocked noise more effectively, and chamomile tea improved mental focus more, when otherwise identical target products carried more reputable names.},
url = {/files/dp566.pdf},
author = {Moty Amar, Dan Ariely, Maya Bar-Hillel, Ziv Carmon, and Chezy Ofir}
}
@booklet {winter-tcfdswicmdds2011,
title = {Case for Discriminatory Sentencing: Why Identical Crimes May Deserve Different Sanctions, The},
journal = {Discussion Papers},
number = {569},
year = {2011},
month = {3},
abstract = {The traditional premise of criminal law is that criminals who are convicted of similar crimes under similar circumstances ought to be subject to identical sentences. This article~provides an efficiency-based rationale for discriminatory sentencing, i.e., establishes~circumstances under which identical crimes ought to be subject to differential sentencing. We also~establish the relevance of this finding to the practices of sentencing and, in particular, to the~Sentencing Guidelines. Most significantly, we establish that the model can explain why~celebrities, leaders, or recidivists ought to be subject to harsher sanctions than others.~Discriminatory sentencing is optimal when criminals confer positive externalities on each other.~If a criminal A who imposes (non-reciprocal) large positive externalities on criminal B is~punished sufficiently harshly, B would expect A not to commit the crime and consequently, he~would expect not to benefit from the positive externalities conferred on him by A. Given that B{\textquoteright}s~expected benefits are lower, the sanctions sufficient to deter B are also lower than the ones~imposed on A. The result can be easily extended to the case of reciprocal externalities. Assume~that a criminal A imposes positive externalities on B and B imposes identical positive~externalities on A. If A is subject to a sufficiently harsh sanction and B knows this, B would~expect A not to perform the crime and therefore would expect not to benefit from the positive~externalities otherwise conferred on B. Consequently, a more lenient sanction than the sanction~imposed on A would be sufficient to deter B.},
url = {/files/dp569.pdf},
author = {Winter, Alon Harel and Eyal}
}
@booklet {ullmann-margalit-c2011,
title = {Considerateness},
journal = {Discussion Papers},
number = {584},
year = {2011},
month = {7},
publisher = {Iyyun, The Jerusalem Philosophical Quarterly 60 (July 2011): 205 {\textquotedblleft}244},
abstract = {A stranger entering the store ahead of you may hold the door open so it does not slam in your~face, or your daughter may tidy up the kitchen when she realizes that you are very tired: both act~out of considerateness. In acting considerately one takes others into consideration. The~considerate act aims at contributing to the wellbeing of somebody else at a low cost to oneself.Focusing on the extreme poles of the spectrum of human relationships, I argue that considerateness~is the foundation upon which our relationships are to be organized in both the thin, anonymous~context of the public space and the thick, intimate context of the family.The first part of the paper, sections I {\textquotedblleft}III, explores the idea that considerateness is the minimum~that we owe to one another in the public space. By acting considerately toward strangers we show~respect to that which we share as people, namely, to our common humanity. The second part,~sections IV {\textquotedblleft}VIII, explores the idea that the family is constituted on a foundation of~considerateness. Referring to the particular distribution of domestic burdens and benefits adopted~by each family as its family deal, I argue that the considerate family deal embodies a distinct,~family-oriented notion of fairness.The third part, sections IX {\textquotedblleft}XV, takes up the notion of family fairness, contrasting it with~justice. In particular I take issue with Susan Okin{\textquoteright}s notion of the just family. Driving a wedge~between justice and fairness, I propose an idea of family fairness that is partial and sympathetic~rather than impartial and empathic, particular and internal rather than generalizable, and based~on ongoing comparisons of preferences among family members. I conclude by characterizing the good~family as the not-unjust family that is considerate and fair.},
url = {/files/dp584.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {peretz-ctbrs2011,
title = {Correlation Through Bounded Recall Strategies},
journal = {Discussion Papers},
number = {579},
year = {2011},
month = {7},
abstract = {Two agents independently choose mixed m-recall strategies that take actions in finite action spaces A1 and A2. The strategies induce a random play, a1,a2,..., where at assumes values in A1 X A2. An M-recall observer observes the play. The goal of the agents is to make the observer believe that the play is similar to a sequence of i.i.d. random actions whose distribution is Q in Delta(A1 X A2). For nearly every t, the following event should occur with probability close to one: "the distribution of a_t+M given at a_t,..,a_t+M is close to Q." We provide a sufficient and necessary condition on m, M, and Q under which this goal can be achieved (for large m). This work is a step in the direction of establishing a folk theorem for repeated games with bounded recall. It tries to tackle the difficulty in computing the individually rational levels (IRL) in the bounded recall setting. Our result implies, for example, that in some games the IRL in the bounded recall game is bounded away below the IRL in the stage game, even when all the players have the same recall capacity.},
url = {/files/dp579.pdf},
author = {Ron Peretz}
}
@booklet {amitavchakravarti-dartcteoetnc2011,
title = {Detecting and Reacting to Change: The Effect of Exposure to Narrow Categorizations},
journal = {Discussion Papers},
number = {588},
year = {2011},
month = {8},
publisher = {Journal of Experimental Psychology Learning, Memory, and Cognition},
abstract = {The ability to detect a change, to accurately assess the magnitude of the change, and to react to that change in a commensurate fashion are of critical importance in many decision domains. Thus, it is important to understand the factors that systematically affect people{\textquoteright}s reactions to change. In this article we document a novel effect: Decision makers{\textquoteright} reactions to a change (e.g., a visual change, a technology change) were systematically affected by the type of categorizations they encountered in an unrelated prior task (e.g., the response categories associated with a survey question). We found that prior exposure to narrow, as opposed to broad, categorizations improved decision makers{\textquoteright} ability to detect change and led to stronger reactions to a given change. These differential reactions occurred because the prior categorizations, even though unrelated, altered the extent to which the subsequently presented change was perceived as either a relatively large change or a relatively small one.},
url = {/files/dp588.pdf},
author = {Amitav Chakravarti, Christina Fang, and Zur Shapira}
}
@booklet {noambar-shai-dsbctf2011,
title = {Do Solitary Bees Count to Five?},
journal = {Discussion Papers},
number = {572},
year = {2011},
month = {5},
abstract = {Efficient foragers avoid returning to food sources that they had previously depleted. Bombus terrestris bumblebees use a counting-like strategy to leave Alcea setosa flowers just after visiting all of their five nectaries. We tested whether a similar strategy is employed by solitary Eucera sp. bees that also forage on A. setosa. Analyses of 261 video-recorded flower visits showed that the bees most commonly probed five nectaries, but occasionally (in 7.8\% of visits) continued to a nectary they had already visited. Probing durations that preceded flower departures were generally shorter than probings that were followed by an additional nectary visit in the same flower. Assuming that probing durations correlate with nectar volumes, this suggests that flower departure frequencies increased after probing of low-rewarding nectaries. The flowers{\textquoteright} spatial attributes were not used as departure cues, but the bees may have left flowers in response to scent marks on previously visited nectaries. We conclude that Eucera females do not exhibit numerical competence as a mechanism for efficient patch use, but rather a combination of a reward-based leaving rule and scent-marking. The bees{\textquoteright} foraging pattern is compatible with Waage{\textquoteright}s (1979, Journal of Animal Ecology, 48, 353-371) patch departure rule, which states that the tendency to leave a foraging patch increases with time, and decreases when food items are encountered. Thus, Eucera resemble bumblebees in avoiding most revisits to already-visited nectaries, but use a different foraging strategy to do so. This difference may reflect lower learning capabilities of solitary bee species compared to social ones.},
url = {/files/dp572.pdf},
author = {Noam Bar-Shai, Tamar Keasar, and Avi Shmida}
}
@booklet {bavly-eog2011,
title = {Elasticity of Games},
journal = {Discussion Papers},
number = {592},
year = {2011},
month = {12},
abstract = {We develop an elasticity index of a strategic game. The index measures the robustness of the set of rational outcomes of a game. The elasticity index of a game is the maximal ratio between the change of the rational outcomes and the size of an infinitesimal perturbation. The perturbation is on the players{\textquoteright} knowledge of the game.The elasticity of a strategic game is a nonnegative number. A small elasticity is indicative of the robustness of the rational outcomes (for example, if there is only one player the elasticity is 0), and a large elasticity is indicative of non-robustness. For example, the elasticity of the (normalized) n-stage finitely repeated prisoner{\textquoteright}s dilemma is at least exponential in n, as is the elasticity of the n-stage centipede game and the n-ranged traveler{\textquoteright}s dilemma. The concept of elasticity enables us to look from a different perspective at Neyman{\textquoteright}s (1999) repeated games when the number of repetitions is not commonly known, and Aumann{\textquoteright}s (1992) demonstration of the effect of irrationality perturbations.},
url = {/files/dp592.pdf},
author = {Gilad Bavly}
}
@booklet {shapira-efmorcipapocaivc2011,
title = {Entrepreneurial Finance Meets Organizational Reality: Comparing Investment Practices And Performance Of Corporate And Independent Venture Capitalists},
journal = {Discussion Papers},
number = {589},
year = {2011},
month = {8},
publisher = {Strategic Management Journal, 31: 990 {\textquotedblleft}1017 (2010)},
abstract = {This paper investigates the effect of compensation of corporate personnel on their investment innew technologies. We focus on a specific corporate activity, namely corporate venture capital(CVC), describing minority equity investment by established-firms in entrepreneurial ventures.The setting offers an opportunity to compare corporate investors to investment experts, theindependent venture capitalists (IVCs). On average, we observe a performance gap betweencorporate investors and their independent counterparts. Interestingly, the performance gap issensitive to CVCs{\textquoteright} compensation scheme: it is the largest when CVC personnel are awardedperformance pay. Not only do we study the association between incentives and performancebut we also document a direct relationship between incentives and the actions managersundertake. For example, we observe disparity between the number of participants in venturecapital syndicates that involve a corporate investor, and those that consist solely of IVCs. Thedisparity shrinks substantially, however, for a subset of CVCs that compensate their personnelusing performance pay. We find a parallel pattern when analyzing the relationship betweencompensation and another investment practice, staging of investment. To conclude, the paperinvestigates the three elements of the principal-agent framework, thus providing direct evidencethat compensation schemes (incentives) shape investment practices (managerial action), andultimately investors{\textexclamdown}{\textbrokenbar} outcome (performance).},
url = {/files/dp589.pdf},
author = {Shapira, Gary Dushnitsky and Zur}
}
@booklet {konigsberg-epdabr2011,
title = {Epistemic Peerage, Disagreement, and Belief Revision},
journal = {Discussion Papers},
number = {583},
year = {2011},
month = {7},
abstract = {Recent debates have centred on the normative influence epistemic peerage should have on the regulation of beliefs in cases of disagreement. A dominant position in this debate is that acknowledging an epistemic peer{\textquoteright}s possession of a belief contrary to one{\textquoteright}s own ought, in itself, to lead to the revision of one{\textquoteright}s doxastic commitments. In what follows I aim to challenge and rethink the notion of peerage underlying the disagreement debate and thus reveal that the traditional view of peerage rests upon an idealized conception of similarly between disagreeing parities, and thus to show that the normative constraints derived from it are equally idealized. Constructively, I will suggest a commonsensical solution to the disagreement problem based on what I propose as a soft, more moderate conception of peerage.},
url = {/files/dp583.pdf},
author = {Amir Konigsberg}
}
@booklet {tomjdejong-oteopatdbparoipv2011,
title = {On the Evolution of Protandry and the Distinction Between Preference and Rank Order in Pollinator Visitation},
journal = {Discussion Papers},
number = {578},
year = {2011},
month = {7},
abstract = {We develop a measure for quantifying rank order of visitation in complex sequences of male-phase versus female-phase flowers. The measure shows whether female flowers are visited before male flowers which enhances plant fitness. We apply the new method to bumble bee visitation in Digitalus purpurea and Echium vulgare and discuss our results in relation to the evolution of protandry in insect pollinated plant species.},
url = {/files/dp578.pdf},
author = {Tom J de Jong, Peter GL Klinkhamer, Avi Shmida, Frank Thuijsman}
}
@booklet {itzhakvenezia-fsamhbpaaiateomv2011,
title = {Firm Specific and Macro Herding by Professional and Amateur Investors and Their Effects on Market Volatility},
journal = {Discussion Papers},
number = {586},
year = {2011},
month = {8},
publisher = {Journal of Banking \& Finance 35 (2011) 1599 {\textquotedblleft}1609},
abstract = {We find a herding tendency among both amateur and professional investors and conclude that the propensity to herd is lower in the professionals. These results are obtained both when we consider herding into individual stocks and herding into stocks in general. Herding depends on the firm{\textquoteright}s systematic risk and size, and the professionals are less sensitive to these variables. The~differences between the amateurs and the professionals may be attributable to the latter{\textquoteright}s superior financial training. Most of the results are consistent with the theory that herding is information-based. We also find that the herding behavior of the two groups is a persistent phenomenon, and that it is positively and significantly correlated with stock market returns{\textquoteright} volatility. Finally, herding, mainly by amateurs, causes market volatility in the Granger causality sense.},
url = {/files/dp586.pdf},
author = {Itzhak Venezia, Amrut Nashikkar, and Zur Shapira}
}
@booklet {reny-iorfmasaaanc2011,
title = {Implementation of Reduced Form Mechanisms: A Simple Approach and a New Characterization},
journal = {Discussion Papers},
number = {594},
year = {2011},
month = {12},
abstract = {We provide a new characterization of implementability of reduced form mechanisms in terms of straightforward second-order stochastic dominance. In addition, we present a simple proof of Matthews{\textquoteright} (1984) conjecture, proved by Border (1991), on implementability.},
url = {http://www.ma.huji.ac.il/hart/abs/q-mech.html},
author = {Reny, Sergiu Hart and Philip J.}
}
@booklet {bar-hillel-lllpeicaspo2011,
title = {Location, Location, Location: Position Effects in Choice Among Simultaneously Presented Options},
journal = {Discussion Papers},
number = {580},
year = {2011},
month = {7},
publisher = {Publshed In: Brun, W., Keren, G., Kirkeboen, G., \& Montgomery, H. (2011). Perspectives on Thinking, Judging, and Decision Making. Oslo: Universitetsforlaget. Chapter 19},
abstract = {{Since its inception, psychology has studied position effects. But the position was a temporal one in sequential presentation, and the dependent variables related to memory and learning. This paper attempts to survey position effects when position is spatial (namely},
url = {/files/db580.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {linial-msiq2011,
title = {Market Share Indicates Quality},
journal = {Discussion Papers},
number = {590},
year = {2011},
month = {10},
abstract = {Market share and quality, or customer satisfaction, go hand in hand. Yet the inference that higher{\textquoteright}{\textquoteright}~market share indicates higher quality is seldom made. The skepticism is in part fueled by elitism,{\textquoteright}{\textquoteright}~the association of mass popularity with lower quality, and by cynicism, ascribing market{\textquoteright}{\textquoteright}~leadership to an entrenched position. We find that though such skepticism is often justified, it is{\textquoteright}{\textquoteright}~correct to make a Bayesian inference that the product with the higher market share has the better{\textquoteright}{\textquoteright}~quality under rather tame assumptions.},
url = {/files/db590R.pdf},
author = {Linial, Amir Ban and Nati}
}
@booklet {biermann-amtcmimm2011,
title = {A Measure to Compare Matchings in Marriage Markets},
journal = {Discussion Papers},
number = {575},
year = {2011},
month = {6},
abstract = {In matching markets the number of blocking pairs is often used as a criterion to compare matchings. We argue that this criterion is lacking an economic interpretation: In many circumstances it will neither reflect the expected extent of partner changes, nor will it capture the satisfaction of the players with the matching. As an alternative, we set up two principles which single out a particularly "disruptive" subcollection of blocking pairs. We propose to take the cardinality of that subset as a measure to compare matchings. This cardinality has an economic interpretation: The subset is a justified objection against the given matching according to a bargaining set characterization of the set of stable matchings. We prove multiple properties relevant for a workable measure of comparison.},
url = {/files/dp575.pdf},
author = {Florian M. Biermann}
}
@booklet {zamir-meiafa2011,
title = {Multiple Equilibria in Asymmetric First-Price Auctions},
journal = {Discussion Papers},
number = {591},
year = {2011},
month = {11},
abstract = {Maskin and Riley (2003) and Lebrun (2006) prove that the Bayes-Nash equilibrium of first-price auctions is unique. This uniqueness requires the assumption that a buyer never bids above his value. We demonstrate that, in asymmetric first-price auctions (with or without a minimum bid), the relaxation of this assumption results in additional equilibria that are "substantial." Although in each of these additional equilibria no buyer wins with a bids above his value, the allocation of the object and the selling price may vary among the equilibria. Furthermore, we show that such phenomena can only occur under asymmetry in the distributions of values.},
url = {/files/db591.pdf},
author = {Zamir, Todd R. Kaplan and Shmuel}
}
@booklet {bar-hillel-tnu2011,
title = {New Unconscious, The},
journal = {Discussion Papers},
number = {570},
year = {2011},
month = {3},
publisher = {The Law \& Business Journal (IDC, Israel) , Issue 12 (September 2010), P. 13-40},
abstract = {Recent research in psychology, especially that called "The New Unconscious", is discovering strange and unintuitive phenomena, some of which raise interesting challenges for the law. This paper discusses some of these challenges. For example, if much of our mental life occurs out of our awareness and control, and yet is subject to easy external manipulation, what implications does this have for holding defendants responsible for their deeds? For that matter, what implications does this have for trusting judges to judge and act as they should, and would, if their own mental processes were fully conscious and controlled? Some provocative ideas are suggested, such as how to make prison terms shorter and more deterring at the same time; assisting judges in overcoming inconsistency and biases; etc.},
url = {/files/db570.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {erandayan-ntnimpifo2011,
title = {Nudge to Nobesity II: Menu Positions Influence Food Orders},
journal = {Discussion Papers},
number = {581},
year = {2011},
month = {7},
publisher = {Judgment and Decision Making, 6(4), June 2011, Pp. 333-342},
abstract = {"Very small but cumulated decreases in food intake may be sufficient to have significant effects, even erasing obesity over a period of years" (Rozin et al., 2011). In two studies, one a lab study and the other a real-world study, we examine the effect of manipulating the position of different foods on a restaurant menu. Items placed at the beginning or the end of the list of their category options were up to twice as popular as when they were placed in the center of the list. Given this effect, placing healthier menu items at the top or bottom of item lists and less healthy ones in their center (e.g., sugared drinks vs. calorie-free drinks) should result in some increase in favor of healthier food choices.},
url = {/files/db581.pdf},
author = {Eran Dayan, Maya Bar-Hillel}
}
@booklet {davidazriel-ooaibrtiadrn2011,
title = {On Optimal Allocation in Binary Response Trials; Is Adaptive Design Really Necessary?},
journal = {Discussion Papers},
number = {568},
year = {2011},
month = {3},
abstract = {We consider the classical problem of selecting the best of two treatments in clinical trials~with binary response. The target is to find the design that maximizes the power of the relevant~test. Many papers use a normal approximation to the power function and claim that Neyman~allocation that assigns subjects to treatment groups according to the ratio of the responses{\textquoteright}~standard deviations, should be used. As the standard deviations are unknown, an adaptive~design is often recommended. The asymptotic justification of this approach is arguable, since~it uses the normal approximation in tails where the error in the approximation is larger than~the estimated quantity. We consider two different approaches for optimality of designs that are~related to Pitman and Bahadur definitions of relative efficiency of tests. We prove that the~optimal allocation according to the Pitman criterion is the balanced allocation and that the~optimal allocation according to the Bahadur approach depends on the unknown parameters.~Exact calculations reveal that the optimal allocation according to Bahadur is often close to~the balanced design, and the powers of both are comparable to the Neyman allocation for~small sample sizes and are generally better for large experiments. Our findings have important~implications to the design of experiments, as the balanced design is proved to be optimal or~close to optimal and the need for the complications involved in following an adaptive design for~the purpose of increasing the power of tests is therefore questionable.},
url = {/files/dp568.pdf},
author = {David Azriel, Micha Mandel, and Yosef Rinott}
}
@booklet {christinafang-pddrapstsppd2011,
title = {Prior Divergence: Do Researchers and Participants Share the Same Prior Probability Distributions?},
journal = {Discussion Papers},
number = {587},
year = {2011},
month = {8},
publisher = {Cognitive Science 35 (2011) 744 {\textquotedblleft}762},
abstract = {Do participants bring their own priors to an experiment? If so, do they share the same priors as the researchers who design the experiment? In this article, we examine the extent to which self-generated priors conform to experimenters{\textquoteright} expectations by explicitly asking participants toindicate their own priors in estimating the probability of a variety of events. We find in Study 1 that despite being instructed to follow a uniform distribution, participants appear to have used their own priors, which deviated from the given instructions. Using subjects{\textquoteright} own priors allows us to account better for their responses rather than merely to test the accuracy of their estimates. Implications for the study of judgment and decision making are discussed.},
url = {/files/dp587.pdf},
author = {Christina Fang, Sari Carp, and Zur Shapira}
}
@booklet {yosefrinott-pifagg2011,
title = {Probability Inequalities for a Gladiator Game},
journal = {Discussion Papers},
number = {571},
year = {2011},
month = {4},
abstract = {Based on a model introduced by Kaminsky, Luks, and Nelson (1984), we consider~a zero-sum allocation game called the Gladiator Game, where two teams of gladiators engage in a sequence of one-to-one fights in which the probability~of winning is a function of the gladiators{\textquoteright} strengths. Each team{\textquoteright}s strategy consist the allocation of its total strength among its gladiators. We find the Nash equilibria of the game and compute its value. To do this, we study interesting majorization-type probability inequalities concerning linear combinations of Gamma random variables.},
url = {/files/dp571.pdf},
author = {Yosef Rinott, Marco Scarsini, Yaming Yu}
}
@booklet {loewenstein-rlipbp2011,
title = {Reinforcement Learning in Professional Basketball Players},
journal = {Discussion Papers},
number = {593},
year = {2011},
month = {12},
publisher = {Published in Nature Communications 2:569.},
abstract = {Reinforcement learning in complex natural environments is a challenging task because the agent should generalize from the outcomes of actions taken in one state of the world to future actions in different states of the world. The extent to which human experts find the proper level of generalization is unclear. Here we show, using the sequences of field goal attempts made by professional basketball players, that the outcome of even a single field goal attempt has a considerable effect on the rate of subsequent 3 point shot attempts, in line with standard models of reinforcement learning. However, this change in behaviour is associated with negative correlations between the outcomes of successive field goal attempts. These results indicate that despite years of experience and high motivation, professional players overgeneralize from the outcomes of their most recent actions, which leads to decreased performance.},
url = {/files/dp593.pdf},
author = {Loewenstein, Tal Neiman and Yonatan}
}
@booklet {cao-robacisfg2011,
title = {Remarks on Bargaining and Cooperation in Strategic Form Games},
journal = {Discussion Papers},
number = {565},
year = {2011},
month = {1},
abstract = {Although possessing many beautiful features, the Hart and Mas-Colell bargaining model is not flawless: the concept of threat in this model may behave quite counter-intuitive, and its SP equilibrium expected payoff vector may not be the same as the min-max solution payoff vector in zero-sum games. If we postpone realizations of all threats to the end of the game, the two problems can be solved simultaneously. This is exactly the 2(a) model suggested by Hart and Mas-Colell in the last section of their paper. I show that the new model, unfortunately, can only guarantee the existence of an SP equilibrium in the two player case. For the original model, I reduce the computation of an SP equilibrium to a system of linear inequalities. Quantitative efficiency and symmetric SP equilibria are also discussed.},
url = {/files/dp565.pdf},
author = {Zhigang Cao}
}
@booklet {hassin-ubnudan2011,
title = {Unseen But Not Unsolved: Doing Arithmetic Non-Consciously},
journal = {Discussion Papers},
number = {576},
year = {2011},
month = {6},
abstract = {The modal view in the cognitive sciences holds that consciousness is necessary for abstract, symbolic and rule-following computations. Hence, mathematical thinking in general, and doing arithmetic more specifically, are widely believed to require consciousness. In the current paper we use continuous flash suppression to expose participants to extremely long-duration (up to 2000 milliseconds) subliminal arithmetic equations. The results of three experiments show that the equations were solved without ever reaching consciousness. In other words, they show that arithmetic can be done unconsciously. These findings imply that the modal view of the unconscious needs to be significantly updated, to include symbolic processes that were heretofore considered to be uniquely conscious.},
url = {/files/dp576.pdf},
author = {Hassin, Asael Y. Sklar and Ran R.}
}
@booklet {mayabar-hillel-wstpogdhgbicr2011,
title = {We Sing the Praise of Good Displays: How Gamblers Bet in Casino Roulette},
journal = {Discussion Papers},
number = {585},
year = {2011},
month = {8},
publisher = {Bar-Hillel, M., \& Zultan, R. I. (2012). We Sing the Praise of Good Displays: How Gamblers Bet in Casino Roulette. CHANCE, 25(2), 27-30.},
abstract = {Gambling frequencies on single numbers in real casino roulette were displayed in a contour map. This resulted not only in a confirmation that gamblers are subject to middle bias, but also to accessibility effects. The figure allowed us to infer the location of the roulette wheel and croupier from the gambling data, as well as infer bounds on the dimensions of the roulette table.},
url = {/files/ Roulette-585.pdf},
author = {Maya Bar-Hillel, Ro{\textquoteleft}i Zultan}
}
@booklet {hart-awaor2011,
title = {A Wealth-Requirement Axiomatization of Riskiness},
journal = {Discussion Papers},
number = {577},
year = {2011},
month = {6},
abstract = {We provide an axiomatic characterization of the measure of riskiness of gambles (risky assets) introduced by Foster and Hart (2009). The axioms are based on the concept of wealth requirement .},
url = {htpp://risk-ax.html},
author = {Hart, Dean P. Foster and Sergiu}
}
@booklet {hellman-acp2010,
title = {Almost Common Priors},
journal = {Discussion Papers},
number = {560},
year = {2010},
month = {9},
abstract = {{What happens when priors are not common? We show that for each type pro{\textlnot}le {\quotedblbase} over a knowledge space (\copyright, ~), where the state space \copyright is connected with respect to the partition pro{\textlnot}le ~, we can associate a value 0 / 1 that we term the prior distance of {\quotedblbase}},
url = {/files/dp560R.pdf},
author = {Ziv Hellman}
}
@booklet {arieli-biacsbor2010,
title = {Backward Induction and Common Strong Belief of Rationality},
journal = {Discussion Papers},
number = {535},
year = {2010},
month = {2},
abstract = {In 1995, Aumann showed that in games of perfect information, common knowledge of rationality is consistent and entails the back- ward induction (BI) outcome. That work has been criticized because it uses "counterfactual" reasoning|what a player "would" do if he reached a node that he knows he will not reach, indeed that he him- self has excluded by one of his own previous moves. This paper derives an epistemological characterization of BI that is outwardly reminiscent of Aumann{\textquoteright}s, but avoids counterfactual reason- ing. Specifically, we say that a player strongly believes a proposition at a node of the game tree if he believes the proposition unless it is logically inconsistent with that node having been reached. We then show that common strong belief of rationality is consistent and entails the BI outcome, where - as with knowledge - the word "common" signifies strong belief, strong belief of strong belief, and so on ad infinitum. Our result is related to - though not easily derivable from - one obtained by Battigalli and Sinischalchi [7]. Their proof is, however, much deeper; it uses a full-blown semantic model of probabilities, and belief is defined as attribution of probability 1. However, we work with a syntactic model, defining belief directly by a sound and complete set of axioms, and the proof is relatively direct.},
url = {/files/dp535.pdf},
author = {Itai Arieli}
}
@booklet {nogaalon-bi2010,
title = {Bayesian Ignorance},
journal = {Discussion Papers},
number = {538},
year = {2010},
month = {2},
abstract = {We quantify the effect of Bayesian ignorance by comparing the social cost obtained in a Bayesian game by agents with local views to the expected social cost of agents having global views. Both benevolent agents, whose goal is to minimize the social cost, and selfish agents, aiming at minimizing their own individual costs, are considered. When dealing with selfish agents, we consider both best and worst equilibria outcomes. While our model is general, most of our results concern the setting of network cost sharing (NCS) games. We provide tight asymptotic results on the effect of Bayesian ignorance in directed and undirected NCS games with benevolent and selfish agents. Among our findings we expose the counter-intuitive phenomenon that "ignorance is bliss": Bayesian ignorance may substantially improve the social cost of selfish agents. We also prove that public random bits can replace the knowledge of the common prior in attempt to bound the effect of Bayesian ignorance in settings with benevolent agents. Together, our work initiates the study of the effects of local vs. global views on the social cost of agents in Bayesian contexts.},
url = {/files/dp538.pdf},
author = {Noga Alon, Yuval Emek, Michal Feldman and Moshe Tennenholtz}
}
@booklet {rinott-biameoqifp2010,
title = {Best Invariant and Minimax Estimation of Quantiles in Finite Populations},
journal = {Discussion Papers},
number = {553},
year = {2010},
month = {5},
publisher = {Journal of Statistical Planning and Inference 141, 2633{\textendash}2644 (2011)},
abstract = {We study estimation of finite population quantiles, with emphasis on estimators that are invariant under monotone transformations of the data, and suitable invariant loss functions. We discuss non-randomized and randomized estimators, best invariant and minimax estimators and sampling strategies relative to different classes. The combination of natural invariance of the kind discussed here, and finite population sampling appears to be novel, and leads to interesting statistical and combinatorial aspects.},
url = {/files/dp553.pdf},
author = {Rinott, Yaakov Malinovsky and Yosef}
}
@booklet {bar-hillel-acomrtuamotcb2010,
title = {A Commentary on Mel Rutherford{\textquoteright}S {\textquoteright}On the Use and Misuse of the "Two Children Brainteaser{\textquoteright}},
journal = {Discussion Papers},
number = {551},
year = {2010},
month = {5},
publisher = {Pragmatics and Cognition 18 (2010)},
abstract = {Rutherford (2010) criticizes the way some people have analyzed the 2-children problem, claiming (correctly) that slight nuances in the problem{\textquoteright}s formulation can change the correct answer. However, his own data demonstrate that even when there is a unique correct answer, participants give intuitive answers that differ from it systematically {\textendash} replicating the data reported by those he criticizes. Thus, his critique reduces to an admonition to use care in formulating and analyzing this brainteaser {\textendash} which is always a good idea {\textendash} but contributes little to what is known, analytically or empirically, about the 2-children problem.},
url = {/files/dp551.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {hart-crbaar2010,
title = {Comparing Risks by Acceptance and Rejection},
journal = {Discussion Papers},
number = {531},
year = {2010},
month = {2},
abstract = {Stochastic dominance is a partial order on risky assets ("gambles") that is based on the uniform preference, of all decision-makers (in an appropriate class), for one gamble over another. We modify this, first, by taking into account the status quo (given by the current wealth) and the possibility of rejecting gambles, and second, by comparing rejections that are substantive (that is, uniform over wealth levels or over utilities). This yields two new stochastic orders: wealth-uniform dominance and utility-uniform dominance. Unlike stochastic dominance, these two orders are complete: any two gambles can be compared. Moreover, they are equivalent to the orders induced by, respectively, the Aumann-Serrano (2008) index of riskiness and the Foster-Hart (2009a) measure of riskiness.},
url = {/files/ risk-u.html},
author = {Sergiu Hart}
}
@booklet {babichenko-cudane2010,
title = {Completely Uncoupled Dynamics and Nash Equilibria},
journal = {Discussion Papers},
number = {529},
year = {2010},
month = {1},
abstract = {A completely uncoupled dynamic is a repeated play of a game, where each period every player knows only his action set and the history of his own past actions and payoffs. One main result is that there exist no completely uncoupled dynamics with finite memory that lead to pure Nash equilibria (PNE) in almost all games possessing pure Nash equilibria. By "leading to PNE" we mean that the frequency of time periods at which some PNE is played converges to 1 almost surely. Another main result is that this is not the case when PNE is replaced by "Nash epsilon-equilibria": we exhibit a completely uncoupled dynamic with finite memory such that from some time on a Nash epsion-equilibrium is played almost surely.},
url = {/files/dp529.pdf},
author = {Yakov Babichenko}
}
@booklet {halbersberg-otdonipftvc2010,
title = {On the Deduction of National Insurance Payments from Tort Victims{\textquoteright} Claims},
journal = {Discussion Papers},
number = {564},
year = {2010},
month = {11},
publisher = {3 Mishpatim Online 1 (2010)},
abstract = {In CA 1093/07 Bachar v. Fokmann [2009] (request for additional hearing denied, 2010) , the Israeli Supreme Court has formed~a formula for calculating the deduction of NII payments from a tort victim{\textquoteright}s claim, when only some of the victim{\textquoteright}s impairment is causally~linked to the tortious act in question. Overall, six Supreme Court Justices have reviewed and affirmed this simple formula.~However, this formula is incorrect, as it contradicts some of the most basic tort premises, ignores the way impairment is calculated, and~necessarily leads to the under-compensation of the victim, and to an unjust enrichment of either the tortfeasor, the National Insurance~Institute, or both. This Article, therefore, calls for the adoption of a different formula~that is both legally and arithmetically correct.},
url = {/files/dp564.pdf},
author = {Yoed Halbersberg}
}
@booklet {kareev-dcipp2010,
title = {Detecting Change In Partner{\textquoteright}s Preferences},
journal = {Discussion Papers},
number = {557},
year = {2010},
month = {7},
abstract = {Studies of the detection of change have commonly been concerned with individuals inspecting a system or a process, whose characteristics were fully determined by the researcher. We, instead, study the detection of change in the preferences - and hence the behavior - of others with whom an individual interacts. More specifically, we study situations in which one{\textquoteright}s benefits are the result of the joint actions of one and one{\textquoteright}s partner when at times the preferred combination is the same for both and at times it is not. In other words, what we change is the payoffs associated with the different combinations of interactive choices and then look at choice behavior following such a change. We find that players are extremely quick to respond to a change in the preferences of their counterparts. This responsiveness can be explained by the players{\textquoteright} impulsive reaction to regret - if one was due - at their most recent decision.},
url = {/files/dp557.pdf},
author = {Kareev, Judith Avrahami and Yaakov}
}
@booklet {linial1-tdors2010,
title = {Dynamics of Reputation Systems, The},
journal = {Discussion Papers},
number = {563},
year = {2010},
month = {11},
abstract = {Online reputation systems collect, maintain and disseminate reputations as a summary numerical score of past interactions of an establishmentwith its users. As reputation systems, including web search engines, gain inpopularity and become a common method for people to select sought services, adynamical system unfolds: Experts{\textquoteright} reputation attracts the potential customers.The experts{\textquoteright} expertise affects the probability of satisfying the customers. Thisrate of success in turn influences the experts{\textquoteright} reputation. We consider hereseveral models where each expert has innate, constant, but unknown level ofexpertise and a publicly known, dynamically varying, reputation.The specific},
url = {/files/dp563.pdf},
author = {Linial1, Amir Ban and Nati}
}
@booklet {edithcohen-ema2010,
title = {Envy-Free Makespan Approximation},
journal = {Discussion Papers},
number = {539},
year = {2010},
month = {2},
abstract = {We study envy-free mechanisms for scheduling tasks on unrelated machines (agents) that approximately minimize the makespan. For indivisible tasks, we put forward an envy-free poly-time mechanism that approximates the minimal makespan to within a factor of O(logm), where m is the number of machines. We also show a lower bound of Omega(log m/log logm). This improves the recent result of Mu{\textquoteright}alem [22] who give an upper bound of (m + 1)/2, and a lower bound of 2 - 1/m. For divisible tasks, we show that there always exists an envy-free poly-time mechanism with optimal makespan. Finally, we demonstrate how our mechanism for envy free makespan minimization can be interpreted as a market clearing problem.},
url = {/files/dp539.pdf},
author = {Edith Cohen, Michal Feldman, Amos Fiat, Haim Kaplan and Svetlana Olonetsky}
}
@booklet {samuel-cahn-tfpoaoadc2010,
title = {Fighter Problem: Optimal Allocation of a Discrete Commodity, The},
journal = {Discussion Papers},
number = {558},
year = {2010},
month = {7},
publisher = {Advances in Applied Probability, (2011), Vol. 43, 121-130.},
abstract = {The Fighter problem with discrete ammunition is studied. An aircraft (fighter) equipped with n anti-aircraft missiles is intercepted by enemy airplanes, the appearance of which follows a homogeneous Poisson process with known intensity. If j of the n missiles are spent at an encounter they destroy an enemy plane with probability a(j), where a(0)=0 and a(j) is a known, strictly increasing concave sequence, e.g., a(j)=1 - qj, 0 < 1. If the enemy is not destroyed, the enemy shoots the fighter down with known probability 1 - u, where 0 u 1. The goal of the fighter is to shoot down as many enemy airplanes as possible during a given time period [0,T ]. Let K(n, t) be an optimal number of missiles to be used at a present encounter, when the fighter has flying time t remaining and n missiles remaining. Three seemingly obvious properties of K(n, t) have been conjectured: [A] The closer to the destination, the more of the n missiles one should use, [B] the more missiles one has, the more one should use, and [C] the more missiles one has, the more one should save for possible future encounters. We show that [C] holds for all 0 u 1, that [A] and [B] hold for the "Invincible Fighter" (u = 1), and that [A] holds but [B] fails for the "Frail Fighter" (u = 0).},
url = {/files/dp558.pdf},
author = {Samuel-Cahn, Jay Bartroff and Ester}
}
@booklet {samet-hcacp2010,
title = {How Common Are Common Priors?},
journal = {Discussion Papers},
number = {532},
year = {2010},
month = {2},
publisher = {Forthcoming in Games and Economic Behavior},
abstract = {To answer the question in the title we vary agents{\textquoteright} beliefs against the background of a fixed knowledge space, that is, a state space with a partition for each agent. Beliefs are the posterior probabilities of agents, which we call type profiles. We then ask what is the topological size of the set of consistent type profiles, those that are derived from a common prior (or a common improper prior in the case of an infinite state space). The answer depends on what we term the tightness of the partition profile. A partition profile is tight if in some state it is common knowledge that any increase of any single agent{\textquoteright}s knowledge results in an increase in common knowledge. We show that for partition profiles which are tight the set of consistent type profiles is topologically large, while for partition profiles which are not tight this set is topologically small.},
url = {/files/dp532.pdf},
author = {Samet, Ziv Hellman and Dov}
}
@booklet {babichenko-hltpe2010,
title = {How Long to Pareto Efficiency?},
journal = {Discussion Papers},
number = {562},
year = {2010},
month = {10},
abstract = {We consider uncoupled dynamics (i.e., dynamics where each player knows only his own payoff function) that reach Pareto efficient and individually rational outcomes. We prove that the number of periods it takes is in the worst case exponential in the number of players.},
url = {/files/dp562.pdf},
author = {Yakov Babichenko}
}
@booklet {bezalelpeleg-otioioia2010,
title = {On the Impact of Independence of Irrelevant Alternatives},
journal = {Discussion Papers},
number = {561},
year = {2010},
month = {10},
publisher = {SERIEs (2012) 3:143-156 : "On the Impact of Independence of Irrelevant Alternatives: the Case of Two-Person NTU Games"},
abstract = {On several classes of n-person NTU games that have at least one Shapley NTU value, Aumann characterized this solution by six axioms: Non-emptiness, efficiency, unanimity, scale covariance, conditional additivity, and independence of irrelevant alternatives (IIA). Each of the first five axioms is logically independent of the remaining axioms, and the logical independence of IIA is an open problem. We show that for n = 2 the first five axioms already characterize the Shapley NTU value, provided that the class of games is not further restricted. Moreover, we present an example of a solution that satisffies the first 5 axioms and violates IIA for 2-person NTU games (N;V) with uniformly p-smooth V(N).},
url = {/files/dp561.pdf},
author = {Bezalel Peleg, Peter Sudh{\textparagraph}lter, Jos\copyright M. Zarzuelo}
}
@booklet {peretz-lcltfa2010,
title = {Learning Cycle Length Through Finite Automata},
journal = {Discussion Papers},
number = {546},
year = {2010},
month = {4},
abstract = {We study the space-and-time automaton-complexity of the CYCLE-LENGTH problem. The input is a periodic stream of bits whose cycle length is bounded by a known number n. The output, a number between 1 and n, is the exact cycle length. We also study a related problem, CYCLE-DIVISOR. In the latter problem the output is a large number that divides the cycle length, that is, a number k >> 1 that divides the cycle length, or (in case the cycle length is small) the cycle length itself. The complexity is measured in terms of the SPACE, the logarithm of the number of states in an automaton that solves the problem, and the TIME required to reach a terminal state. We analyze the worst input against a deterministic (pure) automaton, and against a probabilistic (mixed) automaton. In the probabilistic case we require that the probability of computing a correct output is arbitrarily close to one.We establish the following results: o CYCLE-DIVISOR can be solved in deterministic SPACE o(n), and TIME O(n). o CYCLE-LENGTH cannot be solved in deterministic SPACE X TIME smaller than (n^2). o CYCLE-LENGTH can be solved in probabilistic SPACE o(n), and TIME O(n). o CYCLE-LENGTH can be solved in deterministic SPACE O(nL), and TIME O(n/L), for any positive L < 1.},
url = {/files/db546.pdf},
author = {Ron Peretz}
}
@booklet {halbersberg-lsfmtacfanp2010,
title = {Liability Standards for Multiple-Victim Torts: A Call for a New Paradigm},
journal = {Discussion Papers},
number = {533},
year = {2010},
month = {2},
abstract = {Under the conventional approach in torts, liability for an accident is decided by comparing the injurer{\textquoteright}s costs of precautions with those of the victim, and, under the negligence rule, also with the expected magnitude of harm. In multiplevictim cases, the current paradigm holds that courts should determine liability by comparing the injurer{\textquoteright}s costs of precautions with the victims{\textquoteright} aggregate costs and with their aggregate harm. This aggregative risk-utility test supposedly results in the imposition of liability on the least-cost avoiders of the accident, and, therefore, is assumed efficient. However, this paradigm neglects the importance of the normal differences between tort victims. When victims are heterogeneous with regard to their expected harm or costs of precaution, basing the liability-decision on the aggregate amounts may be incorrect, causing in some cases over-deterrence, while in other, under-deterrence and dilution of liability. A new paradigm is therefore needed. This Article demonstrates how aggregate liability may violate aggregate efficiency, and concludes that decisions based upon aggregate amounts are inappropriate when the victims are heterogeneous-as they typically are in real life. The Article then turns to an exploration of an alternative to the aggregative risk-utility test, and argues for a legal rule that would combine restitution for precaution costs, plus an added small "bonus," with the sampling of victims{\textquoteright} claims.},
url = {/files/db533.pdf},
author = {Yoed Halbersberg}
}
@booklet {sheshinski-loic2010,
title = {Limits on Individual Choice},
journal = {Discussion Papers},
number = {554},
year = {2010},
month = {6},
abstract = {Individuals behave with choice probabilities defined by a multinomial logit (MNL) probability distribution over a finite number of alternatives which includes utilities as parameters. The salient feature of the model is that probabilities depend on the choice-set, or domain. Expanding the choice-set decreases the probabilities of alternatives included in the original set, providing positive probabilities to the added alternatives. The wider probability {\textquoteright}spread{\textquoteright} causes some individuals to fur- ther deviate from their higher valued alternatives, while others find the added alternatives highly valuable. For a population with diverse preferences, there ex- ists a subset of alternatives, called the optimum choice-set, which balances these considerations to maximize social welfare. The paper analyses the dependence of the optimum choice-set on a parameter which specifies the precision of individuals{\textquoteright} choice ({\textquoteright}degree of rationality{\textquoteright}). It is proved that for high values of this parame- ter the optimum choice-set includes all alternatives, while for low values it is a singleton. Numerical examples demonstrate that for intermediate values, the size and possible nesting of the optimum choice-sets is complex. Governments have various means (defaults, tax/subsidy) to directly a{\textquoteright}{\textquoteright}{\textquoteright}{\textquoteright}ect choice probabilities. This is modelled by {\textquoteright}probability weight{\textquoteright}parameters. The paper analyses the structure of the optimum weights, focusing on the possible exclusion of alternatives. A binary example explores the level of {\textquoteright}type one{\textquoteright}and {\textquoteright}type two{\textquoteright}errors which justify the imposition of early eligibility for retirement benefits, common to social security systems. Finally, the e{\textquoteright}{\textquoteright}{\textquoteright}{\textquoteright}ects of heterogeneous degrees of rationality among individuals are briefly discussed.},
url = {/files/db554.pdf},
author = {Eytan Sheshinski}
}
@booklet {bar-hillel-mb2010,
title = {Maya Bar-Hillel},
journal = {Discussion Papers},
number = {548},
year = {2010},
month = {5},
publisher = {Odyssey 8 (2010)},
abstract = {Scientists try to find out the truth about our world. Judges in a court of law try to find out the truth about the target events in the indictment. What are the similarities, and what are the differences, in the procedures that govern the search for truth in these two systems? In particular, why are quantitative tools the hallmark of science, whereas in courts they are rarely used, and when used, are prone to error? (In Hebrew)},
url = {/files/db548.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {harel-nv2010,
title = {Non-Consequentialist Voting},
journal = {Discussion Papers},
number = {545},
year = {2010},
month = {4},
abstract = {Standard theory assumes that voters{\textquoteright} preferences over actions (voting) are induced by their preferences over electoral outcomes (policies, candidates). But voters may also have non-consequentialist (NC) motivations: they may care about how they vote even if it does not a{\textquoteright}{\textquoteright}{\textquoteright}{\textquoteright}ect the outcome. When the likelihood of being pivotal is small, NC motivations can dominate voting behavior. To examine the prevalence of NC motivations, we design an experiment that exogenously varies the probability of being pivotal yet holds constant other features of the decision environment. We find a significant e{\textquoteright}{\textquoteright}{\textquoteright}{\textquoteright}ect, consistent with at least 12.5\% of subjects being motivated by NC concerns.},
url = {/files/db545.pdf},
author = {Harel, Moses Shayo and Alon}
}
@booklet {jaybartroff-ooaoacruaiaatp2010,
title = {On Optimal Allocation of a Continuous Resource Using an Iterative Approach and Total Positivity},
journal = {Discussion Papers},
number = {530},
year = {2010},
month = {1},
publisher = {Advances in Applied Probability, (2010) Vol. 42, Pages 795-815.},
abstract = {We study a class of optimal allocation problems, including the well-known Bomber Problem, with the following common probabilistic structure. An aircraft equipped with an amount x of ammunition is intercepted by enemy airplanes arriving according to a homogenous Poisson process over a fixed time duration t. Upon encountering an enemy, the aircraft has the choice of spending any amount 0},
url = {/files/dp530.pdf},
author = {Jay Bartroff, Larry Goldstein, Yosef Rinott and Ester Samuel-Cahn}
}
@booklet {moldovanu-oslai2010,
title = {Optimal Search, Learning and Implementation},
journal = {Discussion Papers},
number = {543},
year = {2010},
month = {4},
abstract = {We characterize the incentive compatible, constrained efficient policy ("second-best") in a dynamic matching environment, where impatient, privately informed agents arrive over time, and where the designer gradually learns about the distribution of agents{\textquoteright} values. We also derive conditions on the learning process ensuring that the complete-information, dynamically efficient allocation of resources ("first-best") is incentive compatible. Our analysis reveals and exploits close, formal relations between the problem of ensuring implementable allocation rules in our dynamic allocation problems with incomplete information and learning, and between the classical problem, posed by Rothschild [19], of finding optimal stopping policies for search that are characterized by a reservation price property .},
url = {/files/dp543.pdf},
author = {Moldovanu, Alex Gershkov and Benny}
}
@booklet {marcofrancesconi-otootf2010,
title = {On the Origin of the Family},
journal = {Discussion Papers},
number = {534},
year = {2010},
month = {2},
abstract = {This paper presents an overlapping generations model to explain why humans live in families rather than in other pair groupings. Since most non-human species are not familial, something special must be behind the family. It is shown that the two necessary features that explain the origin of the family are given by uncertain paternity and overlapping cohorts of dependent children. With such two features built into our model, and under the assumption that individuals care only for the propagation of their own genes, our analysis indicates that fidelity families dominate promiscuous pair bonding, in the sense that they can achieve greater survivorship and enhanced genetic fitness. The explanation lies in the free riding behavior that characterizes the interactions between competing fathers in the same promiscuous pair grouping. Kin ties could also be related to the emergence of the family. When we consider a kinship system in which an adult male transfers resources not just to his offspring but also to his younger siblings, we find that kin ties never emerge as an equilibrium outcome in a promiscuous environment. In a fidelity family environment, instead, kinship can occur in equilibrium and, when it does, it is efficiency enhancing in terms of greater survivorship and fitness. The model can also be used to shed light on the issue as to why virtually all major world religions are centered around the importance of the family.},
url = {/files/dp534.pdf},
author = {Marco Francesconi, Christian Ghiglino and Motty Perry}
}
@booklet {aumann-arrtmotmwtw2010,
title = {A Response Regarding the Matter of the Man with Three Wives},
journal = {Discussion Papers},
number = {537},
year = {2010},
month = {2},
publisher = {Hama{\textquoteright}yan 50 (2010), 1-11.},
abstract = {A response to criticism of the paper "On the Matter of the Man with Three Wives," Moriah 22 (1999), 98- 107 (see also Rationality Centerdp 102, June 1996). The Moriah paper is a non-mathematical account, written in Hebrew for the Rabbinic public, of "Game-Theoretic Analysis of a Bankruptcy Problem from the Talmud," by R. Aumann and M. Maschler, J. Econ. Th. 36 (1985), 195- 213. The current response appeared in Hama{\textquoteright}yan 50 (2010), 1- 11.},
url = {/files/dp537.pdf},
author = {Robert J. Aumann}
}
@booklet {denizdizdar-rmitdkp2010,
title = {Revenue Maximization in the Dynamic Knapsack Problem},
journal = {Discussion Papers},
number = {544},
year = {2010},
month = {4},
abstract = {We analyze maximization of revenue in the dynamic and stochastic knapsack problem where a given capacity needs to be allocated by a given deadline to sequentially arriving agents. Each agent is described by a two-dimensional type that reflects his capacity requirement and his willingness to pay per unit of capacity. Types are private information. We first characterize implementable policies. Then we solve the revenue maximization problem for the special case where there is private information about per-unit values, but capacity needs are observable. After that we derive two sets of additional conditions on the joint distribution of values and weights under which the revenue maximizing policy for the case with observable weights is implementable, and thus optimal also for the case with two-dimensional private information. In particular, we investigate the role of concave continuation revenues for implementation. We also construct a simple policy for which per-unit prices vary with requested weight but not with time, and prove that it is asymptotically revenue maximizing when available capacity/ time to the deadline both go to infinity. This highlights the importance of nonlinear as opposed to dynamic pricing.},
url = {/files/dp544.pdf},
author = {Deniz Dizdar, Alex Gershkov and Benny Moldovanu}
}
@booklet {kareev-troiisd2010,
title = {Role of Impulses in Shaping Decisions, The},
journal = {Discussion Papers},
number = {552},
year = {2010},
month = {5},
publisher = {Journal of Behavioral Decision Making (forthcoming)},
abstract = {This article explores the extent to which decision behavior is shaped by short-lived reactions to the outcome of the most recent decision. We inspected repeated decision-making behavior in two versions of each of two decision-making tasks, an individual task and a strategic one. By regressing behavior onto the outcomes of recent decisions, we found that the upcoming decision was well predicted by the most recent outcome alone, with the tendency to repeat a previous action being affected both by its actual outcome and by the outcomes of actions not taken. Because the goodness of predictions based on the most recent outcome did not diminish as participants gained experience with the task, we conclude that repeated decisions are continuously affected by impulsive reactions.},
url = {/files/dp552.pdf},
author = {Kareev, Judith Avrahami and Yaakov}
}
@booklet {aumann-troiitwfc2010,
title = {Role of Incentives in the World Financial Crisis, The},
journal = {Discussion Papers},
number = {536},
year = {2010},
month = {2},
abstract = {A lecture explaining the causes of the 2008 9 world financial crisis in terms of ordinary economic processes. The lecture was delivered at the 39th St. Gallen Symposium, University of St. Gallen, Switzerland, 8 May 2009.},
url = {/files/dp536.pdf},
author = {Robert J. Aumann}
}
@booklet {mayabar-hillel-arbaonaspopap2010,
title = {A Rose by Any Other Name: A Social-Cognitive Perspective on Poets and Poetry},
journal = {Discussion Papers},
number = {549},
year = {2010},
month = {5},
publisher = {Judgment and Decision Making, Vol. 7, No. 2, March 2012, Pp. 149 {\textquotedblleft}164},
abstract = {Evidence, anecdotal and scientific, suggests that people treat (or are affected by) products of prestigious sources differently than those of less prestigious, or of anonymous, sources. The products which are the focus of the present study are poems, and the sources are the poets. We explore the manner in which the poet s name affects the experience of reading a poem. Study 1 establishes the effect we wish to address: a poet s reputation enhances the evaluation of a poem. Study 2 asks whether it is only the reported evaluation of the poem that is enhanced by the poet s name (as was the case for The Emperor s New Clothes) or the enhancement is genuine and unaware. Finding for the latter, Study 3 explores whether the poet s name changes the reader s experience of it, so that in a sense one is reading a different poem. We conclude that it is not so much that the attributed poem really differs from the unattributed poem, as that it is just ineffably better. The name of a highly regarded poet seems to prime quality, and the poem becomes somehow better. This is a more subtle bias than the deliberate one rejected in Study 2, but it is a bias nonetheless. Ethical implications of this kind of effect are discussed.},
url = {/files/ P\&P.pdf, dp549.pdf},
author = {Maya Bar-Hillel, Alon Maharshak, Avital Moshinsky, Ruth Nofech}
}
@booklet {larrygoldstein-scosstfsmce2010,
title = {Stochastic Comparisons of Stratifed Sampling Techniques for Some Monte Carlo Estimators},
journal = {Discussion Papers},
number = {556},
year = {2010},
month = {7},
publisher = {Bernoulli 17, 592-608. (2011)},
abstract = {We compare estimators of the (essential) supremum and the integral of a function f defined on a measurable space when f may be observed at a sample of points in its domain, possibly with error. The estimators compared vary in their levels of stratification of the domain, with the result that more refined stratification is better with respect to different criteria. The emphasis is on criteria related to stochastic orders. For example, rather than compare estimators of the integral of f by their variances (for unbiased estimators), or mean square error, we attempt the stronger comparison of convex order when possible. For the supremum the criterion is based on the stochastic order of estimators.For some of the results no regularity assumptions for f are needed, whilefor others we assume that f is monotone on an appropriate domain.},
url = {/files/dp556.pdf},
author = {Larry Goldstein, Yosef Rinott and Marco Scarsini}
}
@booklet {nogaalon-samflon2010,
title = {Strategyproof Approximation Mechanisms for Location on Networks},
journal = {Discussion Papers},
number = {541},
year = {2010},
month = {2},
abstract = {We consider the problem of locating a facility on a network, represented by a graph. A set of strategic agents have different ideal locations for the facility; the cost of an agent is the distance between its ideal location and the facility. A mechanism maps the locations reported by the agents to the location of the facility. Specifically, we are interested in social choice mechanisms that do not utilize payments. We wish to design mechanisms that are strategyproof, in the sense that agents can never benefit by lying, or, even better, group strategyproof, in the sense that a coalition of agents cannot all benefit by lying. At the same time, our mechanisms must provide a small approximation ratio with respect to one of two optimization targets: the social cost or the maximum cost.We give an almost complete characterization of the feasible truthful approximation ratio under both target functions, deterministic and randomized mechanisms, and with respect to different network topologies. Our main results are: We show that a simple randomized mechanism is group strategyproof and gives a tight approximation ratio of 3/2 for the maximum cost when the network is a circle; and weshow that no randomized SP mechanism can provide an approximation ratio better than 2-o(1) to the maximum cost even when the network is a tree, thereby matching a trivial upper bound of two.},
url = {/files/dp541.pdf},
author = {Noga Alon, Michal Feldman, Ariel D. Procaccia and Moshe Tennenholtz}
}
@booklet {davidazriel-ttvedids2010,
title = {Treatment Versus Experimentation Dilemma in Dose-Finding Studies, The},
journal = {Discussion Papers},
number = {559},
year = {2010},
month = {9},
publisher = {Journal of Statistical Planning and Inference 141, 2759{\textendash}2768. (2011).},
abstract = {Phase I clinical trials are conducted in order to find the maximum tolerated dose (MTD) of a given drug from a finite set of doses. For ethical reasons, these studies are usually sequential, treating patients or group of patients with the best available dose according to the current knowledge. However, it is proved here that such designs, and, more generally, designs that concentrate on one dose from some time on, cannot provide consistent estimators for the MTD unless very strong parametric assumptions hold. We describe a family of sequential designs that treat individuals with one of the two closest doses to the estimated MTD, and prove that such designs, under general conditions, concentrate eventually on the two closest doses to the MTD and estimate the MTD consistently. It is shown that this family contains randomized designs that assign the MTD with probability that approaches 1 as the size of the experiment goes to infinity. We compare several designs by simulations, studying their performances in terms of correct estimation of the MTD and the proportion of individuals treated with the MTD.},
url = {/files/dp559.pdf},
author = {David Azriel, Micha Mandel and Yosef Rinott}
}
@booklet {edithcohen-taeicag2010,
title = {Truth and Envy in Capacitated Allocation Games},
journal = {Discussion Papers},
number = {540},
year = {2010},
month = {2},
abstract = {We study auctions with additive valuations where agents have a limit on the number of items they may receive. We refer to this setting as capacitated allocation games. We seek truthful and envy free mechanisms that maximize the social welfare. I.e., where agents have no incentive to lie and no agent seeks to exchange outcomes with another.In 1983, Leonard showed that VCG with Clarke Pivot payments (which is known to be truthful, individually rational, and have no positive transfers), is also an envy free mechanism for the special case of n items and n unit capacity agents. We elaborate upon this problem and show that VCG with Clarke Pivot payments is envy free if agent capacities are all equal. When agent capacities are not identical, we show that there is no truthful and envy free mechanism that maximizes social welfare if one disallows positive transfers.For the case of two agents (and arbitrary capacities) we show a VCG mechanism that is truthful, envy free, and individually rational, but has positive transfers. We conclude with a host of open problems that arise from our work.},
url = {/files/dp540.pdf},
author = {Edith Cohen, Michal Feldman, Amos Fiat, Haim Kaplan and Svetlana Olonetsky}
}
@booklet {lev-atporm2010,
title = {A Two-Dimensional Problem of Revenue Maximization},
journal = {Discussion Papers},
number = {542},
year = {2010},
month = {4},
abstract = {We consider the problem of finding the mechanism that maximizes the revenue of a seller of multiple objects. This problem turns out to be significantly more complex than the case where there is only a single object (which was solved by Myerson [5]). The analysis is difficult even in the simplest case studied here, where there are two exclusive objects and a single buyer, with valuations uniformly distributed on triangular domains. We show that the optimal mechanisms are piecewise linear with either 2 or 3 pieces, and obtain explicit formulas for most cases of interest},
url = {/files/dp542.pdf},
author = {Omer Lev}
}
@booklet {noambar-shai-tuonibbift2010,
title = {Use of Numerical Information by Bees in Foraging Tasks, The},
journal = {Discussion Papers},
number = {555},
year = {2010},
month = {6},
abstract = {The ability of invertebrates to perform complex cognitive tasks is widely debated. Bees utilize the number of landmarks en-route to their destination as cues for navigation, but their use of numerical information in other contexts has not been studied. Numerical regularity in the spatial distribution of food occurs naturally in some flowers, which contain a fixed number of nectaries. Bees that collect nectar from such flowers are expected to increase their foraging efficiency by avoiding return visits to empty nectaries. This can be achieved if bees base their flowerdeparture decisions on the number of nectaries they had already visited, or on other sources of information that co-vary with this number.We tested, through field observations and laboratory experiments, whether bees adapt their departure behavior to the number of available food resources. Videorecorded observations of bumblebees that visited Alcea setosa flowers with five nectaries revealed that the conditional probability of flower departure after five probings was 93\%. Visit duration, the spatial attributes of the flowers and scent marks could be excluded as flower-leaving cues, while the volume of nectar collected may have guided part of the departure decisions. In the laboratory the bees foraged on two patches, each with three computer-controlled feeders, but could receive only up to two sucrose-solution rewards in each patch visit. The foragers gradually increased their tendency to leave the patches after the second reward, while the frequency of patch departure after the first reward remained constant. Patch-visit duration, nectar volume, scent marks and recurring visit sequences in a patch were ruled out as possible sources of patch-leaving information.We conclude that bumblebees distinguish among otherwise identical stimuli by their serial position in a sequence, and use this capability to increase foraging efficiency. Our findings support an adaptive role for a complicated cognitive skill in a seemingly small and simple invertebrate.},
url = {/files/dp555.pdf},
author = {Noam Bar-Shai, Tamar Keasar and Avi Shmida}
}
@booklet {roizultan-wbwibtfw2010,
title = {When Being Wasteful Is Better Than Feeling Wasteful},
journal = {Discussion Papers},
number = {550},
year = {2010},
month = {5},
publisher = {Judgment and Decision Making, 5( 7), 489-496},
abstract = {"Waste not want not" expresses our culture{\textquoteright}s aversion to waste. "I could have gotten the same thing for less" is a sentiment that can diminish pleasure in a transaction. We study people{\textquoteright}s willingness to "pay" to avoid this spoiler. In one scenario, participants imagined they were looking for a rental apartment, and had bought a subscription to an apartment listing. If a cheaper subscription had been declined, respondents preferred not to discover post hoc that it would have sufficed. Specifically, they preferred ending their quest for the ideal apartment after seeing more, rather than fewer, apartments. Other scenarios produced similar results. We conclude that people may sometimes prefer to be wasteful in order to avoid feeling wasteful.},
url = {/files/dp550.pdf},
author = {Ro{\textquoteright}i Zultan, 5a Bar-Hillel , Nitsan Guy}
}
@booklet {hellman-bsscircg2009,
title = {Bargaining Set Solution Concepts in Repeated Cooperative Games},
journal = {Discussion Papers},
number = {523},
year = {2009},
month = {10},
abstract = {This paper is concerned with the question of extending the definition of the bargaining set, a cooperative game solution, when cooperation takes place in a repeated setting. The focus is on situations in which the players face (finite or infinite) sequences of exogenously specified TU-games and receive sequences of imputations against those static cooperative games in each time period. Two alternative definitions of what a {\textquoteleft}sequence of coalitions{\textquoteright} means in such a context are considered, in respect to which the concept of a repeated game bargaining set may be defined, and existence and non-existence results are studied. A solution concept we term subgame-perfect bargaining set sequences is also defined, and sufficient conditions are given for the nonemptiness of subgame-perfect solutions in the case of a finite number of time periods.},
url = {/files/dp523.pdf},
author = {Ziv Hellman}
}
@booklet {zamir-obestcjttdc2009,
title = {On Bayesian-Nash Equilibria Satisfying the Condorcet Jury Theorem: The Dependent Case},
journal = {Discussion Papers},
number = {527},
year = {2009},
month = {12},
publisher = {Forthcoming in Social Choice and Welfare},
abstract = {We investigate sufficient conditions for the existence of Bayesian-Nash equilibria that satisfy the Condorcet Jury Theorem (CJT). In the Bayesian game Gn among n jurors, we allow for arbitrary distribution on the types of jurors. In particular, any kind of dependency is possible. If each juror i has a constant strategy , si (that is, a strategy that is independent of the size n{\textquoteright}{\textquoteright}{\textquoteright}{\textquoteright}{\textyen}i of the jury), such that s=( s1, s2, . . . , sn . . .) satisfies theCJT, then byMcLennan (1998) there exists a Bayesian-Nash equilibrium that also satisfies the CJT. We translate the CJT condition on sequences of constant strategies into the following problem: (**) For a given sequence of binary random variables X = (X1, X2, ..., Xn, ...) with joint distribution P, does the distribution P satisfy the asymptotic part of the CJT ? We provide sufficient conditions and two general (distinct) necessary conditions for (**). We give a complete solution to this problem when X is a sequence of exchangeable binary random variables.},
url = {/files/db527.pdf},
author = {Zamir, Bezalel Peleg and Shmuel}
}
@booklet {schuster-bdtptcwpipauei2009,
title = {Bottlenose Dolphins (Tursiops Truncatus) Prefer to Cooperate When Petted: Integrating Proximate and Ultimate Explanations II},
journal = {Discussion Papers},
number = {508},
year = {2009},
month = {3},
publisher = {Journal of Comparative Psychology 123 (1) (2009), 45 {\textquotedblleft}55.},
abstract = {{\textquoteright}~Cooperation poses theoretical problems because the behaviors of individuals can benefit others. Evolutionary and game-theory explanations that focus on maximizing one{\textquoteright}s own material outcomes are usually supported by experimental models with isolated and anonymous subjects. Cooperation in the natural world, however, is often a social act whereby familiar individuals coordinate behaviors for shared outcomes. Social cooperation is also associated with a cooperation bias expressed as a preference for cooperation even when noncooperation is immediately more beneficial. The authors report on evidence for such a bias in a captive group of bottlenose dolphins that voluntarily preferred to receive petting from human guides by using a pairwise coordinated approach, even though this was more difficult, and total petting amount was thereby reduced. To explain why this bias occurs, the authors propose an integrated behavioral-evolutionary approach whereby performance is determined by two kinds of immediate outcomes: material gains and intrinsic affective states associated with cooperating. The latter can provide reinforcement when immediate material gains are reduced, delayed, or absent. Over a lifetime, this proximate mechanism can lead to cooperative relationships whose long-term ultimate consequences can be adaptive.},
url = {/files/dp508.pdf},
author = {Schuster, Amir Perelberg and Richard}
}
@booklet {perry-cfpomt2009,
title = {Contracts for Providers of Medical Treatments},
journal = {Discussion Papers},
number = {516},
year = {2009},
month = {7},
abstract = {We analyze the nature of optimal contracts in a dynamic model of repeated (and persistent) adverse selection and moral hazard. In particular we consider the case of surgeons who diagnose patients and then decide whether to perform an operation, and if so, whether to exert a costly but unobservable effort. The probability of a successful operation is a function of the surgeon{\textquoteright}s effort, his quality, and the severity of the patient{\textquoteright}s problem, all of which are the surgeon{\textquoteright}s private information.The principal observes only the history of successes and failures and is allowed to promise financial rewards as a function of the observed history. His goal is to provide incentives at minimum cost so that if the patient needs minor surgery he will be treated by any type of surgeon (low- or high-quality) but if he needs major surgery, only a high-quality surgeon will perform the operation.The optimal contract-pair is characterized and is shown to reflect the practice often observed in the medical industry. Performing an operation is a gamble whose probability of success is higher, the higher the quality of the surgeon. A sequence of operations is exponentially less likely to be successful if the surgeon is not high-quality. An optimal contract for a high-quality surgeon exploits this fact by stipulating a high reward conditional on a long history of successes, while such a stipulation makes the contract much less attractive to a low-quality surgeon.},
url = {/files/dp516.pdf},
author = {Perry, Alex Gershkov and Motty}
}
@booklet {schuster-cbibdtacipaue2009,
title = {Coordinated Breathing in Bottlenose Dolphins (Tursiops Truncatus) as Cooperation: Integrating Proximate and Ultimate Explanations},
journal = {Discussion Papers},
number = {507},
year = {2009},
month = {3},
publisher = {Journal of Comparative Psychology 122 (2) (2008), 109 {\textquotedblleft}120.},
abstract = {In this study, coordinated breathing was studied in 13 common bottlenose dolphins because of its links with spontaneous coordinated behaviors (e.g., swimming, foraging, and playing). A strong link was shown between dyadic coordination levels and age/sex categories when both association patterns and spatial formation are considered. This is consistent with a significant influence of social relationships on cooperating and contrasts with an economic perspective based on immediate material outcomes alone. This cooperation bias is explained by linking proximate processes that evoke performance with ultimate evolutionary processes driven by long-term adaptive outcomes. Proximate processes can include 2 kinds of immediate outcomes: material reinforcements and affective states associated with acts of cooperating that can provide positive reinforcement regardless of immediate material benefits (e.g., when there is a time lag between cooperative acts and material outcomes). Affective states can then be adaptive by strengthening social relationships that lead to eventual gains in fitness.},
url = {/files/dp507.pdf},
author = {Schuster, Amir Perelberg and Richard}
}
@booklet {zultan-cocciarvcm2009,
title = {Cycles of Conditional Cooperation in a Real-Time Voluntary Contribution Mechanism},
journal = {Discussion Papers},
number = {511},
year = {2009},
month = {5},
publisher = {Games 2011, 2(1), 1-15.},
abstract = {This paper provides a new way to identify conditional cooperation in a real-time version of the standard voluntary contribution mechanism. Our approach avoids most drawbacks of the traditional procedures because it relies on endogenous cycle lengths, which are defined by the number of contributors a player waits before committing to a further contribution. Based on hypothetical distributions of randomly generated contribution sequences, we provide strong evidence for conditionally cooperative behavior. Moreover, notwithstanding a decline in contributions, conditional cooperation is found to be stable over time.},
url = {/files/dp511.pdf},
author = {Zultan, M. Vittoria Levati and Ro I}
}
@booklet {lehmann-fonpt2009,
title = {Foundations of Non-Commutative Probability Theory},
journal = {Discussion Papers},
number = {514},
year = {2009},
month = {6},
abstract = {Kolmogorov{\textquoteright}s setting for probability theory is given an original generalization to account for probabilities arising from Quantum Mechanics. The sample space has a central role in this presentation and random variables, i.e., observables, are defined in a natural way. The mystery presented by the algebraic equations satisfied by (non-commuting) observables that cannot be observed in the same states is elucidated},
url = {/files/dp514.pdf},
author = {Daniel Lehmann}
}
@booklet {aumann-ge2009,
title = {Game Engineering},
journal = {Discussion Papers},
number = {518},
year = {2009},
month = {9},
publisher = {Transcript of the Lecture at Ko\AA{\textordmasculine}mi\AA{\quotedblbase}ski University in Warsaw, Poland, May 14, 2008.},
abstract = {"Game Engineering" deals with the application of game theoretic methods to interactive situations or systems in which the rules are well defined, or where the designer can himself specify the rules. This talk, which addressed a business-school audience with no specific knowledge of game theory, describes five examples of game engineering: two dealing with auctions, two with traffic systems, and one with arbitration. At the end of the talk there was a Q \& A session, which, too, is recorded here.},
url = {/files/dp518b.pdf},
author = {Robert J. Aumann}
}
@booklet {lehmann-gmlsotsoa2009,
title = {General Matching: Lattice Structure of the Set of Agreements},
journal = {Discussion Papers},
number = {501},
year = {2009},
month = {1},
abstract = {The subset agreement problem generalizes all forms of two-sided matching. Two agents need to agree on some subset of a given},
url = {/files/dp501.pdf},
author = {Lehmann, Aron Matskin and Daniel}
}
@booklet {kalai-hqccf2009,
title = {How Quantum Computers Can Fail},
journal = {Discussion Papers},
number = {500},
year = {2009},
month = {1},
abstract = {We propose and discuss two postulates on the nature of errors in highly correlated noisy physical stochastic systems. The first postulate asserts that errors for a pair of substantially correlated elements are themselves substantially correlated. The second postulate asserts that in a noisy system with many highly correlated elements there will be a strong effect of error synchronization. These postulates appear to be damaging for quantum computers. The paper includes a self-contained description of the model of quantum computers.},
url = {/files/dp500.pdf},
author = {Gil Kalai}
}
@booklet {winter-ir2009,
title = {Incentive Reversal},
journal = {Discussion Papers},
number = {505},
year = {2009},
month = {2},
abstract = {By incentive reversal we refer to situations in which an increase of rewards for all agents results in fewer agents exerting effort. We show that externalities among peers may give rise to such intriguing situations even when all agents are fully rational. We provide a necessary and sufficient condition on the organizational technology in order for it to be susceptible to incentive reversal. The condition implies that some degree of complementarity is enough to allow incentive reversal.},
url = {/files/dp505.pdf},
author = {Winter, Eyal}
}
@booklet {levy-isgwpbii2009,
title = {Infinite Sequential Games with Perfect but Incomplete Information},
journal = {Discussion Papers},
number = {524},
year = {2009},
month = {11},
abstract = {Infinite sequential games, in which Nature chooses a Borel winning set and reveals it to one of the players, do not necessarily have a value if Nature has 3 or more choices. The value does exist if Nature has 2 choices. The value also does not necessarily exist if Nature chooses from 2 Borel payoff functions. Similarly, if Player 1 chooses the Borel winning set and does not reveal his selection to Player 2, then the game does not necessarily have a value if there are 3 or more choices; it does have a value if there are only 2 choices. If Player 1 chooses from 2 Borel payoff functions and does not reveal his choice, the game need not have a value either.},
url = {/files/dp524.pdf},
author = {Levy, Itai Arieli and Yehuda (John)}
}
@booklet {romm-ieojbiea2009,
title = {Information Effects of Jump Bidding in English Auctions},
journal = {Discussion Papers},
number = {526},
year = {2009},
month = {12},
abstract = {Should an auctioneer start a rising auction from some starting price or set it as a reservation price? Under what circumstances might a bidder find it rational to raise the current offer by a substantial factor instead of making just a small increase above the highest bid? This paper aims to answer both of these questions by exploring the implications of jump bidding over the information sets available to the bidders. Our motivation is to find whether hiding the information about other players{\textquoteright} signals might be beneficial for one of the bidders. We first show that it is better for the auctioneer to set a reservation price rather than "jump" to the starting price. We then prove that in a very general setting and when bidders are risk-neutral there exist no equilibrium with jump bidding (in non-weakly dominated strategies). Finally, we demonstrate that jump bidding might be a rational consequence of risk aversion, and analyze the different effects at work.},
url = {/files/dp526.pdf},
author = {Romm, Dror Lellouche and Assaf}
}
@booklet {gura-iigtaame2009,
title = {Insights into Game Theory: An Alternative Mathematical Experience},
journal = {Discussion Papers},
number = {503},
year = {2009},
month = {2},
publisher = {Quaderni Di Ricerca in Didattica 19 (2009), 172-183 (G.R.I.M. Department of Mathematics, University of Palermo, Italy)},
abstract = {Few branches of mathematics have been more influential in the social sciences than game theory. In recent years, it has become an essential tool for all social scientists studying the strategic behavior of competing individuals, firms, and countries. However, the mathematical complexity of game theory is often very intimidating for students who have only a basic understanding of mathematics. Insights into Game Theory addresses this problem by providing students with an understanding of the key concepts and ideas of game theory without using formal mathematical notation. The authors use four different topics (college admissions, social justice and majority voting, coalitions and cooperative games, and a bankruptcy problem from the Talmud) to investigate four areas of game theory. The result is a fascinating introduction to the world of game theory and its increasingly important role in the social sciences.},
url = {/files/db503.pdf},
author = {Ein-Ya Gura}
}
@booklet {winter-ibwattopefpb2009,
title = {Interactions Between Workers and the Technology of Production: Evidence from Professional Baseball},
journal = {Discussion Papers},
number = {506},
year = {2009},
month = {2},
publisher = {The Review of Economics and Statistics 91(1), 188 {\textquotedblleft}200.},
abstract = {This paper shows that workers can affect the productivity of their coworkers based on income maximization considerations, rather than relying on behavioral considerations such as peer pressure, social norms, and shame. We show that a worker{\textquoteright}s effort has a positive effect on the effort of coworkers if they are complements in production, and a negative effect if they are substitutes. The theory is tested using a panel data set of baseball players from 1970 to 2003. The results are consistent with the idea that the effort choices of workers interact in ways that are dependent on the technology of production.},
url = {/files/db506.pdf},
author = {Winter, Eric D. Gould and Eyal}
}
@booklet {hellman-iecsacp2009,
title = {Iterated Expectations, Compact Spaces, and Common Priors},
journal = {Discussion Papers},
number = {522},
year = {2009},
month = {10},
publisher = {Games and Economic Behavior, 72 (2011) 163 "171.},
abstract = {Extending to infinite state spaces that are compact metric spaces a result previously attained by D. Samet solely in the context of finite state spaces, a necessary and suficient condition for the existence of a common prior for several players is given in terms of the players{\textquoteright} present beliefs only. A common prior exists if and only if for each random variable it is common knowledge that all Cesaro means of iterated expectations with respect to any permutation converge to the same value; this value is its expectation with respect to the common prior. It is further shown that compactness is a necessary condition for some of the results.},
url = {/files/db522.pdf},
author = {Ziv Hellman}
}
@booklet {zamir-lgwcss2009,
title = {Leadership Games with Convex Strategy Sets},
journal = {Discussion Papers},
number = {525},
year = {2009},
month = {11},
abstract = {A basic model of commitment is to convert a two-player game in strategic form to a leadership game with the same payoffs, where one player, the leader, commits to a strategy, to which the second player always chooses a best reply. This paper studies such leadership games for games with convex strategy sets. We apply them to mixed extensions of finite games, which we analyze completely, including nongeneric games. The main result is that leadership is advantageous in the sense that, as a set, the leader s payoffs in equilibrium are at least as high as his Nash and correlated equilibrium payoffs in the simultaneous game. We also consider leadership games with three or more players, where most conclusions no longer hold.},
url = {/files/db525.pdf},
author = {Zamir, Bernhard von Stengel and Shmuel}
}
@booklet {sheshinski-laas2009,
title = {Longevity and Aggregate Savings},
journal = {Discussion Papers},
number = {519},
year = {2009},
month = {9},
abstract = {Two salient features of modern economic growth are the rise in aggregate savings rates and the steady increase in life expectancy. This paper links these processes, showing that under certain conditions economic theory supports the hypothesis that increased longevity leads to higher aggregate savings in steady state. The analysis is based on a lifecycle model with uncertain longevity in which individuals choose an optimum consumption path and a retirement age. Conditions on the age-specific pattern of improvements in survival probabilities are shown to ensure that individual savings rise with longevity and that aggregation preserves this result. Population theory (Coale (1972)) is used to link the steady-state age density function and the population{\textquoteright}s growth rate to individuals{\textquoteright} survival probabilities. The importance of a competitive annuity market in avoiding unintended bequests is underscored.},
url = {/files/db519.pdf},
author = {Eytan Sheshinski}
}
@booklet {shapira-mraaae2009,
title = {Managerial Reasoning About Aspirations and Expectations},
journal = {Discussion Papers},
number = {498},
year = {2009},
month = {1},
publisher = {Journal of Economic Behavior and Organization 66 (2008),60- 73},
abstract = {Managerial reasoning about performance targets and subsequent actions can be influenced by whether they focus their attention on expectations of future events or internal efforts to meet organizational goals. This study explores how managers think about expectations and aspirations by examining the semantic similarities and differences between these concepts for practicing managers and economists, the results suggesting subtle differences in how economists and managers reason about aspirations and expectations. For economists, the concept of expectations played a major role and influenced their subsequent thinking about goals and actions while managers conceptually separated factors that were controllable and uncontrollable, the concept of expectation not playing the central role for them. Implications for descriptive and prescriptive models of decision- making are discussed.},
url = {/files/db498.pdf},
author = {Shapira, Theresa Lant and Zur}
}
@booklet {neyman-tmvomopargwii2009,
title = {Maximal Variation of Martingales of Probabilities and Repeated Games with Incomplete Information, The},
journal = {Discussion Papers},
number = {510},
year = {2009},
month = {4},
abstract = {The variation of a martingale m[k] of k+1 probability measures p(0),...,p(k) on a finite (or countable) set X is the expectation of the sum of ||p(t)-p(t-1)|| (the L one norm of the martingale differences p(t)-p(t-1)), and is denoted V(m[k]). It is shown that V(m[k]) is less than or equal to the square root of 2kH(p(0)), where H(p) is the entropy function (the some over x in X of p(x)log p(x) and log stands for the natural logarithm). Therefore, if d is the number of elements of X, then V(m[k]) is less than or equal to the square root of 2k(log d). It is shown that the order of magnitude of this bound is tight for d less than or equal to 2 to the power k: there is C>0 such that for every k and d less than or equal to 2 to the power k there is a martingale m[k]=p(0),...,p(k) of probability measures on a set X with d elements, and with variation V(m[k]) that is greater or equal the square root of Ck(log d). It follows that the difference between the value of the k-stage repeated game with incomplete information on one side and with d states, denoted v(k), and the limit of v(k), as k goes to infinity, is bounded by the maximal absolute value of a stage payoff times the square root of 2(log d)/k, and it is shown that the order of magnitude of this bound is tight.},
url = {/files/db510.pdf},
author = {Abraham Neyman}
}
@booklet {eyalwinter-meare2009,
title = {Mental Equilibrium and Rational Emotions},
journal = {Discussion Papers},
number = {521},
year = {2009},
month = {9},
abstract = {We introduce emotions into an equilibrium notion. In a mental equilibrium each player "selects" an emotional state which determines the player{\textquoteright}s preferences over the outcomes of the game. These preferences typically differ from the players{\textquoteright} material preferences. The emotional states interact to play a Nash equilibrium and in addition each player{\textquoteright}s emotional state must be a best response (with respect to material preferences) to the emotional states of the others. We discuss the concept behind the definition of mental equilibrium and show that this behavioral equilibrium notion organizes quite well the results of some of the most popular experiments in the experimental economics literature. We shall demonstrate the role of mental equilibrium in incentive mechaisms and will discuss the concept of collective emotions, which is based on the idea that players can coordinate their emotional states.},
url = {/files/db521.pdf},
author = {Eyal Winter, Ignacio Garcia-Jurado, Jose Mendez-Naya and Luciano Mendez-Naya}
}
@booklet {parimalkantibag-mvseacc2009,
title = {Multi-Stage Voting, Sequential Elimination and Condorcet Consistency},
journal = {Discussion Papers},
number = {504},
year = {2009},
month = {2},
abstract = {A class of voting procedures based on repeated ballots and elimination of one candidate in each round is shown to always induce an outcome in the top cycle and is thus Condorcet consistent, when voters behave strategically. This is an important class as it covers multi-stage, sequential elimination extensions of all standard one-shot voting rules (with the exception of negative voting), the same one-shot rules that would fail Condorcet consistency. The necessity of repeated ballots and sequential elimination are demonstrated by further showing that Condorcet consistency would fail in all standard voting rules that violate one or both of these conditions.},
url = {/files/db504.pdf},
author = {Parimal Kanti Bag, Hamid Sabourian and Winter, Eyal}
}
@booklet {weisel-pcacdi2009,
title = {Punishment, Cooperation, and Cheater Detection in},
journal = {Discussion Papers},
number = {528},
year = {2009},
month = {12},
publisher = {Games 1 (1) (2010)},
abstract = {Explaining human cooperation in large groups of non-kin is a major challenge to both rational choice theory and the theory of evolution. Recent research suggests that group cooperation can be explained assuming that cooperators can punish non-cooperators or cheaters. The experimental evidence comes from economic games in which group members are informed about the behavior of all others and cheating occurs in full view. We demonstrate that under more realistic information conditions, where cheating is less obvious, punishment is ineffective in enforcing cooperation. Evidently, the explanatory power of punishment is constrained by the visibility of cheating.},
url = {/files/dp528b.pdf},
author = {Weisel, Gary Bornstein and Ori, B.,}
}
@booklet {tamarkeasar-ragfafpfmafomgb2009,
title = {Red Anemone Guild Flowers as Focal Places for Mating and Feeding of Mediterranean Glaphyrid Beetles},
journal = {Discussion Papers},
number = {515},
year = {2009},
month = {7},
abstract = {Several species of glaphyrid beetles forage and mate on Mediterranean red flowers. In red anemones and poppies in Israel, female beetles occupy only bowl-shaped a subset of the flowers, do not aggregate, and are hidden below the petals. This raises the question how males find their mates. The possibility that males and females orient to similar plant- generated cues, thereby increasing their mate encounter prospects, was investigated. Beetle attraction to red models increased with display area in previous studies. Choice tests with flowers and with models indicate that both male and female beetles prefer large displays to smaller ones. In anemones, beetles rest, feed and mate mainly on male- phase flowers, which are larger than female- phase flowers. Poppies that contain beetles are larger than the population average. These findings support the hypothesis that males and females meet by orienting to large red displays. Corolla size correlates with pollen reward in both plant species, suggesting that visits to large flowers also yield foraging benefits. Male beetles often jump rapidly among adjacent flowers. In contrast to the preference for large flowers by stationary individuals, these jumps sequences are random with respect to flower (in anemone) and size (in poppy). They may enable males to detect females at sex-phase close range. We hypothesize that males employ a mixed mate- searching strategy, combining orientation to floral signals and to female- produced cues. The glaphyrids{\textquoteright} preference for large flowers may have selected for extraordinarily large displays within the "red anemone" pollination guild of the Levant.},
url = {/files/dp515.pdf},
author = {Tamar Keasar, Ally R. Harar, Guido Sabatinelli, Denis Keith, Amots Dafni, Ofrit Shavit, Assaph Zylbertal and Avi Shmida}
}
@booklet {bavli-ratefohd2009,
title = {Rule-Rationality and the Evolutionary Foundations of Hyperbolic Discounting},
journal = {Discussion Papers},
number = {513},
year = {2009},
month = {6},
abstract = {Recent studies involving intertemporal choice have prompted many economists to abandon the classical exponential discount utility function in favor of one characterized by hyperbolic discounting. Hyperbolic discounting, however, implies a reversal of preferences over time that is often described as dynamically inconsistent and ultimately irrational. We analyze hyperbolic discounting and its characteristic preference reversal in the context of rule-rationality, an evolutionary approach to rationality that proposes that people do not maximize utility in each of their acts; rather, they adopt rules of behavior that maximize utility in the aggregate, over all decisions to which an adopted rule applies. In this sense, people maximize over rules rather than acts. Rule-rationality provides a framework through which we may examine the rational basis for hyperbolic discounting in fundamental terms, and in terms of its evolutionary foundations. We conclude that although aspects of hyperbolic discounting may contain a certain destructive potential, it is likely that its evolutionary foundations are sound {\textendash} and its application may well be as justified and rational today as it was for our foraging ancestors.},
url = {/files/dp513b.pdf},
author = {Hillel Bavli}
}
@booklet {samuel-cahn-tspomerassawg2009,
title = {Secretary Problem of Minimizing Expected Rank: A Simple Suboptimal Approach with Generalizations, The},
journal = {Discussion Papers},
number = {502},
year = {2009},
month = {1},
publisher = {Advances in Applied Probability (2009) 41, P. 1041-1058.},
abstract = {The secretary problem for selecting one item so as to minimize its expected rank, based on observing the relative ranks only, is revisited. A simple suboptimal rule, which performs almost as well as the optimal rule, is given. The rule stops with the smallest i such that Ri},
url = {/files/dp502b.pdf},
author = {Samuel-Cahn, Abba M. Krieger and Ester}
}
@booklet {hart-asrolttaior2009,
title = {A Simple Riskiness Order Leading to the Aumann {\textquotedblleft}Serrano Index of Riskiness},
journal = {Discussion Papers},
number = {517},
year = {2009},
month = {8},
abstract = {We introduce a simple "riskier than" order between gambles, from which the index of riskiness developed by Aumann and Serrano (2008) is directly obtained.},
url = {http://risk-as.html},
author = {Sergiu Hart}
}
@booklet {jaybartroff-tsrastrftcbp2009,
title = {Spend-It-All Region and Small Time Results for the Continuous Bomber Problem, The},
journal = {Discussion Papers},
number = {509},
year = {2009},
month = {4},
publisher = {Sequential Analysis, (2010) Vol. 29, Pages 275-291.},
abstract = {A problem of optimally allocating partially effective ammunition x to be used on randomly arriving enemies in order to maximize an aircraft{\textquoteright}s probability of surviving for time t, known as the Bomber Problem, was first posed by Klinger and Brown (1968). They conjectured a set of apparently obvious monotonicity properties of the optimal allocation function K(x,t). Although some of these conjectures, and versions thereof, have been proved or disproved by other authors since then, the remaining central question, that K(x,t) is nondecreasing in x, remains unsettled. After reviewing the problem and summarizing the state of these conjectures, in the setting where x is continuous we prove the existence of a "spend-it-all" region in which K(x,t) = x and find its boundary, inside of which the long-standing, unproven conjecture of monotonicity of K(.,t) holds. A new approach is then taken of directly estimating K(x,t) for small t, providing a complete small-t asymptotic description of K(x,t) and the optimal probability of survival.},
url = {/files/dp509.pdf},
author = {Jay Bartroff, Larry Goldstein, Yosef Rinott and Ester Samuel-Cahn}
}
@booklet {levy-sgwil2009,
title = {Stochastic Games with Information Lag},
journal = {Discussion Papers},
number = {499},
year = {2009},
month = {1},
abstract = {Two-player zero-sum stochastic games with finite state and action spaces, as well as two-player zero-sum absorbing games with compact metric action spaces, are known to have undiscounted values. We study such games under the assumption that one or both players observe the actions of their opponent after some time-dependent delay. We develop criteria for the rate of growth of the delay such that a player subject to such an information lag can still guarantee himself in the undiscounted game as much as he could have with perfect monitoring. We also demonstrate that the player in the Big Match with the absorbing action subject to information lags which grow too rapidly, according to certain criteria, will not be able to guarantee as much as he could have in the game with perfect monitoring. toring.},
url = {/files/dp499.pdf},
author = {Yehuda (John) Levy}
}
@booklet {sheshinski-ulaiie2009,
title = {Uncertain Longevity and Investment in Education},
journal = {Discussion Papers},
number = {520},
year = {2009},
month = {9},
abstract = {It has been argued that increased life expectancy raises the rate of return on education, causing a rise in the investment in education followed by an increase in lifetime labor supply. Empirical evidence of these relations is rather weak. Building on a lifecycle model with uncertain longevity, this paper shows that increased life expectancy does not suffice to warrant the above hypotheses. We provide assumptions about the change in survival probabilities, specifically about the age dependence of hazard rates, which determine individuals{\textquoteright} behavioral response w.r.t. education, work and age of retirement. Comparison is made between the case when individuals have access to a competitive annuity market and the case of no insurance.},
url = {/files/dp520b.pdf},
author = {Eytan Sheshinski}
}
@booklet {neyman-tvotzrgwiiaud2009,
title = {Value of Two-Person Zero-Sum Repeated Games with Incomplete Information and Uncertain Duration, The},
journal = {Discussion Papers},
number = {512},
year = {2009},
month = {5},
abstract = {It is known that the value of a zero-sum infinitely repeated game with incomplete information on both sides need not exist [Aumann Maschler 95]. It is proved that any number between the minmax and the maxmin of the zero-sum infinitely repeated game with incomplete information on both sides is the value of the long finitely repeated game where players{\textquoteright} information about the uncertain number of repetitions is asymmetric.},
url = {/files/dp512.pdf},
author = {Abraham Neyman}
}
@booklet {zultan-apcafs2008,
title = {Altruism, Partner Choice, and Fixed-Cost Signaling},
journal = {Discussion Papers},
number = {483},
year = {2008},
month = {5},
abstract = {We consider a multitype population model with unobservable types, in which players are engaged in the {\textquoteleft}mutual help{\textquoteright} game: each player can increase her partner{\textquoteright}s fitness at a cost to oneself. All individuals prefer free riding to cooperation, but some of them, helpers, can establish reciprocal cooperation in a long-term relationship. Such heterogeneity can drive cooperation through a partner selection mechanism under which helpers choose to interact with one another and shun non-helpers. However, in contrast to the existing literature, we assume that each individual is matched with an anonymous partner, and therefore, stable cooperation cannot be achieved by partner selection per se. We suggest that helpers can signal their type to one another in order to establish long-term relationships, and we show that a reliable signal always exists. Moreover, due to the difference in future benefits of a long-term relationship for helpers and non-helpers, the signal need not be a handicap, in the sense that the cost of the signal need not be correlated with type.},
url = {/files/dp483.pdf},
author = {Zultan, Andriy Zapechelnyuk and Ro{\textquoteright}i}
}
@booklet {zamir-bggwii2008,
title = {Bayesian Games: Games with Incomplete Information},
journal = {Discussion Papers},
number = {486},
year = {2008},
month = {6},
publisher = {Encyclopedia of Complexity and System Science, Bob Meyers (ed.), Springer (forthcoming)},
abstract = {An encyclopedia article on games with incomplete information. Table of contents:1. Definition of the subject and its importance2. Introduction: Modeling incomplete information3. Harsanyi s model: The notion of type4. Aumann s model5. Harsanyi s model and the hierarchies of beliefs6. The Universal Belief Space7. Belief subspaces8. Consistent beliefs and Common prior9. Bayesian games and Bayesian equilibrium10. Bayesian equilibrium and Correlated equilibrium11. Concluding remarks and future directions12. Bibliography},
url = {/files/dp486.pdf},
author = {Shmuel Zamir}
}
@booklet {zamir-cjttdc2008,
title = {Condorcet Jury Theorem: The Dependent Case},
journal = {Discussion Papers},
number = {477},
year = {2008},
month = {3},
abstract = {{We provide an extension of the Condorcet Theorem. Our model includes both the Nitzan-Paroush framework of unequal competencies and Ladha s model of correlated voting by the jurors . We assume that the jurors behave informatively , that is, they do not make a strategic use of their information in voting. Formally, we consider a sequence of binary random variables X = (X1,X2, ...,Xn, ...) with range in 0,1 and a joint probability distribution P. The pair (X,P) is said to satisfy the Condorcet Jury Theorem (CJT) if limn{\textdagger}{\textquoteright}{\textasciicircum}\v zP({\textasciicircum}{\textquoteleft}Xi>n/2)=1. For a general (dependent) distribution P we provide necessary as well as sufficient conditions for the CJT. Let pi = E(Xi)},
url = {/files/dp477.pdf},
author = {Zamir, Bezalel Peleg and Shmuel}
}
@booklet {mas-colell-cgisf2008,
title = {Cooperative Games in Strategic Form},
journal = {Discussion Papers},
number = {484},
year = {2008},
month = {5},
abstract = {In this paper we view bargaining and cooperation as an interaction superimposed on a strategic form game. A multistage bargaining procedure for N players, the "proposer commitment" procedure, is presented. It is inspired by Nash{\textquoteright}s two-player variable-threat model; a key feature is the commitment to "threats." We establish links to classical cooperative game theory solutions, such as the Shapley value in the transferable utility case. However, we show that even in standard pure exchange economies the traditional coalitional function may not be adequate when utilities are not transferable.},
url = {/files/ st-val.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {kahana-teccfjr2008,
title = {Easy Core Case for Judicial Review, The},
journal = {Discussion Papers},
number = {489},
year = {2008},
month = {9},
abstract = {This paper defends judicial review on the grounds that judicial review is necessary for protecting a right to a hearing. Judicial review is praised by its advocates on the basis of instrumentalist reasons, i.e., because of its desirable contingent consequences such as protecting rights, romoting democracy, maintaining stability, etc. We argue that instrumentalist easons for judicial review are bound to fail and that an adequate defense of udicial review requires justifying judicial review on non-instrumentalist grounds. A non-instrumentalist justification grounds judicial review in essential attributes of he judicial process. In searching for a non-instrumental justification we establish that judicial review is designed to protect the right to a hearing. The right to a hearing consists of hree components: the opportunity to voice a grievance, the opportunity to be rovided with a justification for a decision that impinges (or may have impinged) on one s rights and, last, the duty to reconsider the initial decision giving rise to the grievance. The right to a hearing is valued independently of the merit of the decisions generated by the judicial process. We also argue that the recent proposals to reinforce popular or democratic participation in shaping the Constitution are wrong because they are detrimental to the right to a hearing.},
url = {/files/dp489.pdf},
author = {Kahana, Alon Harel and Tsvi}
}
@booklet {kalai-eacs2008,
title = {Economics and Common Sense},
journal = {Discussion Papers},
number = {485},
year = {2008},
month = {5},
abstract = {A review of Steven E. Landsburg{\textquoteright}s book More Sex is Safer Sex, the Unconventional Wisdom of Economics. The surprise 2005 best seller Freakonomics by Steven Levitt and Stephen Dubner launched a small genre of books by economists applying economic reasoning to everyday life and finding counterintuitive results. Mathematician and economist Steven Landsburg, whose online Slate column {\textquoteleft}{\textquoteleft}Everyday Economics predates the Levitt and Dubner volume, has now collected and expanded some of those columns to form the basis of his new book.In his book, Landsburg uses the {\textquoteleft}{\textquoteleft}weapons of evidence and logic, especially the logic of economics to draw surprising conclusions which run against common sense. {\textquoteleft}{\textquoteleft}If your common sense tells you otherwise, says Landsburg, {\textquoteleft}{\textquoteleft}remember that common sense also tells you the Earth is flat. In this review, scheduled to appear in the June/July 2008 issue of the Notices of the American Mathematical Society, we describe and discuss some of the issues and claims raised in Landsburg{\textquoteright}s book. For further discussion see the May 29 post in http://gilkalai.wordpress.com/ .},
url = {/files/dp485.pdf},
author = {Gil Kalai}
}
@booklet {ehudfriedgut-ecbmo2008,
title = {Elections Can Be Manipulated Often},
journal = {Discussion Papers},
number = {481},
year = {2008},
month = {4},
abstract = {The Gibbard-Satterthwaite theorem states that every non-trivial voting method between at least 3 alternatives can be strategically manipulated. We prove a quantitative version of the Gibbard-Satterthwaite theorem: a random manipulation by a single random voter will succeed with non-negligible probability for every neutral voting method between 3 alternatives that is far from being a dictatorship.},
url = {/files/dp481.pdf},
author = {Ehud Friedgut, Gil Kalai and Noam Nisan}
}
@booklet {abbamkrieger-emsfoaqga22008,
title = {Extreme(ly) Mean(ingful): Sequential Formation of a Quality Group (Revised April 2009)},
journal = {Discussion Papers},
number = {478},
year = {2008},
month = {3},
publisher = {Nnals of Applied Probability, (2010) Vol. 20, 2261-2294.},
abstract = {The present paper studies the limiting behavior of the average score of a sequentially selected group of items or individuals, the underlying distribution of which, F, belongs to the Gumbel domain of attraction of extreme value distribution. This class contains the Normal, log Normal, Gamma, Weibull and many other distributions. The selection rules are the better than average ({\texttwosuperior} = 1) and the {\texttwosuperior}-better than average rule, defined as follows. After the first item is selected, another item is admitted into the group if and only if its score is greater than {\texttwosuperior} times the average score of those already selected. Denote by Yk the average of the k first selected items, and by Tk the time it takes to amass them. Some of the key results obtained are: Under mild conditions, for the better than average rule, Yk less a suitable chosen function of log k converges almost surely to a finite random variable. When 1 {\textasciicircum}{\textquoteright} F(x) = exp(-[x$\pm$ +h(x)]) , $\pm$>0 and h(x)/x$\pm${\textdagger}{\textquoteright}0 as x{\textdagger}{\textquoteright}{\textasciicircum}\v z, then Tk is of approximate order k2 . When {\texttwosuperior} > 1, the asymptotic results for Yk are of a completely different order of magnitude. Interestingly, for a class of distributions, Tk, suitably normalized, asymptotically approaches 1, almost surely for relatively small {\texttwosuperior} > 1, in probability for moderate sized {\texttwosuperior} and in distribution when {\texttwosuperior} is large.},
url = {/files/dp478b.pdf},
author = {Abba M. Krieger , Moshe Pollak and Ester Samuel-Cahn}
}
@booklet {yehoramleshem-fnpitphsshprac2008,
title = {Female-Biased Nectar Production in the Protandrous, Hermaphroditic Shrub Salvia Hierosolymitana: Possible Reasons and Consequences},
journal = {Discussion Papers},
number = {494},
year = {2008},
month = {12},
abstract = {Sexual selection theory states that male reproductive success is commonly limited by opportunities for fertilization, while female reproductive success is more often resource-limited. This creates higher selective pressure on males to attract mating partners as compared with females. Similar reasoning, when applied to plant reproduction, predicts higher investment in pollinator-attracting traits, such as nectar production, in male flowers than in female flowers. Contrary to this prediction, nectar production by female-phase flowers in the protandrous hermaphrodite shrub Salvia hierosolymita (Boiss.) (Lamiaceae) was significantly higher than in male-phase flowers in two populations over three years. Female-biased nectar production may reflect selection for pollinator attraction by female-phase flowers, possibly due to pollen limitation. In support of this interpretation, (a) the number of pollen grains on female-phase stigmas was substantially higher than on male-phase stigmas, suggesting that the female phase received more insect visits ; (b) the number of germinating pollen grains in female-phase styles only slightly exceeded the number of ovules per flower, therefore pollen availability may restrict female fitness. Proportions of female-phase flowers decreased from the base of the inflorescences towards their top. This creates a vertical gradient of nectar production, which may help reduce geitonogamous pollination by effecting pollinator behavior.},
url = {/files/dp494.1.pdf},
author = {Yehoram Leshem, Tamar Keasar and Avi Shmida}
}
@booklet {zultan-jmsajs2008,
title = {Job Market Signaling and Job Search},
journal = {Discussion Papers},
number = {488},
year = {2008},
month = {7},
abstract = {The high cost of searching for employers borne by prospective employees increases friction in the labor market and inhibits formation of efficient employer-employee relationships. It is conventionally agreed that mechanisms that reduce the search costs (e.g., internet portals for job search) lower unemployment and improve overall welfare. We demonstrate that a reduction of the search costs may have the converse effect. We consider a labor market in which workers can either establish a long-term relationship with an employer by being productive, or shirk and move from one employer to the next. In addition, the workers can signal to a potential employer their intention to be productive. We show that lower search costs lead to fewer employees willing to exert effort and, in a separating equilibrium, to more individuals opting to stay completely out of the job market and remain unemployed. Furthermore, we show that lower search costs not only deteriorate the market composition, but also impair efficiency by leading to more expensive signaling in a separating equilibrium.},
url = {/files/db488.pdf},
author = {Zultan, Andriy Zapechelnyuk and Ro{\textquoteright}i}
}
@booklet {neyman-leams2008,
title = {Learning Effectiveness and Memory Size},
journal = {Discussion Papers},
number = {476},
year = {2008},
month = {2},
url = {/files/db476.1.pdf},
author = {Abraham Neyman}
}
@booklet {fink-majhfecitpottdpo2008,
title = {Manipulating Allocation Justice: How Framing Effects Can Increase the Prevalence of the Talmudic Division Principle "Shnaim Ohazin"},
journal = {Discussion Papers},
number = {479},
year = {2008},
month = {4},
abstract = {In the role of judges in bankruptcy problems, people may prescribe various just divisions of the available goods to claimants who have rights for them. Two widespread division rules are equality and proportionality. A less known rule is the Talmudic "Shnaim Ohazin" principle, whose basic rationale is applying an equal division only to that part of the goods which is genuinely under dispute. This paper demonstrates that the ratio of subjects that prefer the "Shnaim Ohazin" principle over equality and proportionality can be increased by a simple framing manipulation. These results suggest that framing effects might be a prevalent factor in the realm of distributive justice.},
url = {/files/db479.pdf},
author = {Fink, Yevgeni Berzak and Michael, Howlett,}
}
@booklet {robertjaumann-mmim2008,
title = {Michael Maschler: In Memoriam},
journal = {Discussion Papers},
number = {493},
year = {2008},
month = {11},
publisher = {Games and Economic Behavior 2, (2008), 351-392},
url = {/files/db493b.pdf},
author = {Robert J. Aumann, Ein-Ya Gura, Sergiu Hart, Bezalel Peleg, Hana Shemesh and Shmuel Zamir}
}
@booklet {hart-nead2008,
title = {Nash Equilibrium and Dynamics},
journal = {Discussion Papers},
number = {490},
year = {2008},
month = {9},
publisher = {Games and Economic Behavior 71 (2011), 6-8},
abstract = {John F. Nash, Jr., submitted his Ph.D. dissertation entitled Non-Cooperative Games to Princeton University in 1950. Read it 58 years later, and you will find the germs of various later developments in game theory. Some of these are presented below, followed by a discussion concerning dynamic aspects of equilibrium.},
url = {/files/ nash.html},
author = {Sergiu Hart}
}
@booklet {tomdejong-osaipateom2008,
title = {Optimal Sex Allocation in Plants and the Evolution of Monoecy},
journal = {Discussion Papers},
number = {487},
year = {2008},
month = {6},
abstract = {Question: Which ecological factors favor the transition from plants with hermaphrodite flowers to monoecious plants with separate male and female flowers on the same individual?Mathematical methods: ESS computation in sex allocation models Key assumptions: Within a flower, costs of attraction, pollen production, style/ovary and fruit with seeds are assumed fixed. Often costs of fruit with seeds outweigh other costs. Female flowers produce more seeds than hermaphrodite flowers, due to less pollen-stigma interference.Conclusions: When sex allocation is female-biased at the flower level, plants respond by producing either male flowers or flowers without fruit. Hermaphroditism evolves to andromonoecy (male and hermaphrodite flowers on the same plant) and then to monoecy. In species with large fruits, sex allocation is female-biased at the flower level and the production of male flowers is favored. This facilitates the production of female flowers. The alternative route via gynomonoecy (female and hermaphrodite flowers on the same plant) is improbable since it requires unrealistically high levels of seed production in female flowers. Monoecious species are likely to have: (i) small, inexpensive flowers, (ii) large, costly fruits and seeds, and (iii) high fertilization rates.},
url = {/files/dp487.pdf},
author = {Tom de Jong, Avi Shmida and Frank Thuijsman}
}
@booklet {tamarkeasar-peotrachmsfef2008,
title = {Pollination Ecology of the Red Anemone Coronaria (Ranunculaceae): Honeybees May Select for Early Flowering},
journal = {Discussion Papers},
number = {491},
year = {2008},
month = {11},
abstract = {Large red bowl-shaped flowers characterize the Mediterranean poppy guild plants, andwere suggested to reflect convergence for beetle pollination. However, the earliest-bloomingspecies in this guild, Anemone coronaria (L.), starts flowering about a month before beetleemergence. Early flowering can be adaptive if the plant receives sufficient pollination by othermeans during this period. We investigated A. coronaria s pollination prospects throughout itsflowering season by monitoring its flowering phenology, the composition of the surroundinginsect community, and insect visitors. Clear protogyny precluded self pollination, and anthesisoccurred gradually over several days. Released pollen was quickly collected by insects,suggesting no major role for wind pollination. Beetles, flies and bees were trapped at the studysite throughout the flowering period. Honeybees were the main anemone visitors during the firstseven weeks of flowering, and were joined by Glaphyrid beetles in the remaining three weeks.Early- and late-blooming flowers had similar female reproductive success. We propose thateffective pollination by honeybees may allow anemones to bloom in early spring and therebyreduce competition for pollinators with later-blooming species. Our results support previousevidence for pollination of red flowers by bees, and for the importance of generalization inpollination interactions in heterogeneous environments.},
url = {/files/dp491.pdf},
author = {Tamar Keasar, Avi Shmida and Asaph Zylbertal}
}
@booklet {arieli-ricg2008,
title = {Rationalizability in Continuous Games},
journal = {Discussion Papers},
number = {495},
year = {2008},
month = {12},
abstract = {Define a continuous game to be one in which every player{\textquoteright}s strategy set is a Polish space, and the payoff},
url = {/files/dp495.pdf},
author = {Itai Arieli}
}
@booklet {aumann-rva2008,
title = {Rule-Rationality Versus Act-Rationality},
journal = {Discussion Papers},
number = {497},
year = {2008},
month = {12},
abstract = {People{\textquoteright}s actions often deviate from rationality, i.e., self-interested behavior. We propose a paradigm called rule-rationality, according to which people do not maximize utility in each of their acts, but rather follow rules or modes of behavior that usually-but not always-maximize utility. Specifically, rather than choosing an act that maximizes utility among all possible acts in a given situation, people adopt rules that maximize average utility among all applicable rules, when the same rule is applied to many apparently similar situations. The distinction is analogous to that between Bentham{\textquoteright}s "act-utilitarianism" and the "rule-utilitarianism" of Mill, Harsanyi, and others. The genesis of such behavior is examined, and examples are given. The paradigm may provide a synthesis between rationalistic neo-classical economic theory and behavioral economics.},
url = {/files/dp497.pdf},
author = {Robert J. Aumann}
}
@booklet {lehmann-sstlgoqp2008,
title = {Similarity-Projection Structures: The Logical Geometry of Quantum Physics},
journal = {Discussion Papers},
number = {482},
year = {2008},
month = {5},
abstract = {Similarity-Projection structures abstract the numerical properties of real scalar product of rays and projections in Hilbert spaces to provide a more general framework for Quantum Physics. They are characterized by properties that possess direct physical meaning. They provide a formal framework that subsumes both classical boolean logic concerned with sets and subsets and quantum logic concerned with Hilbert space, closed subspaces and projections. They shed light on the role of the phase factors that are central to Quantum Physics. The generalization of the notion of a self-adjoint operator to SP-structures provides a novel notion that is free of linear algebra.},
url = {/files/dp482.pdf},
author = {Daniel Lehmann}
}
@booklet {ilanyaniv-scaorwmpbmcitlaj2008,
title = {Spurious Consensus and Opinion Revision: Why Might People Be More Confident in Their Less Accurate Judgments?},
journal = {Discussion Papers},
number = {492},
year = {2008},
month = {11},
publisher = {Journal of Experimental Psychology: Learning, Memory, and Cognition (forthcoming)},
abstract = {In the interest of improving their decision-making, individuals revise their opinions on the basis of samples of opinions obtained from others. However, such a revision process may lead decision-makers to experience greater confidence in their less accurate judgments. We theorize that people tend to underestimate the informative value of independently drawn opinions, if these appear to conflict with one another, yet place some confidence even in the "spurious consensus" which may arise when opinions are sampled interdependently. The experimental task involved people s revision of their opinions (caloric estimates of foods) on the basis of advice. The method of sampling the advisory opinions (independent or interdependent) was the main factor. The results reveal a dissociation between confidence and accuracy. A theoretical underlying mechanism is suggested whereby people attend to consensus (consistency) cues at the expense of information on interdependence. Implications for belief-updating and for individual and group decisions are discussed.},
url = {/files/dp492.pdf},
author = {Ilan Yaniv, Shoham Choshen-Hillel and Maxim Milyavsky}
}
@booklet {gorodeisky-saodd2008,
title = {Stochastic Approximation of Discontinuous Dynamics},
journal = {Discussion Papers},
number = {496},
year = {2008},
month = {12},
abstract = {We consider stochastic dynamics whose expected (average) vector field is not necessarily continuous. We generalize the ordinary differential equation method for analyzing stochastic processes to this case, by introducing leading functions that {\textquoteright}lead the stochastic process across the discontinuities, which yields approximation results for the asymptotic behavior of the stochastic dynamic. We then apply the approximation results to the classical best-response dynamics used in game theory.},
url = {/files/dp496.pdf},
author = {Ziv Gorodeisky}
}
@booklet {arieli-tacore2008,
title = {Towards a Characterization of Rational Expectations},
journal = {Discussion Papers},
number = {475},
year = {2008},
month = {2},
abstract = {R. J. Aumann and J. H. Dr{\textasciidieresis}ze (2008) define a rational expectation of a player i in a game G as the expected payo of some type of i in some belief system for G in which common knowledge of rationalityand common priors obtain. Our goal is to characterize the set of rational expectations in terms of the game{\textquoteright}s payoff matrix. We provide such a characterization for a specific class of strategic games, calledsemi-elementary, which includes Myerson{\textquoteright}s "elementary" games.},
url = {/files/dp475.pdf},
author = {Itai Arieli}
}
@booklet {ullmann-margalit-tbbotciotcitk2008,
title = {"We the Big Brother" Or The Curious Incident of the Camera in the Kitchen},
journal = {Discussion Papers},
number = {480},
year = {2008},
month = {4},
publisher = {Published as "The Case of the Camera in the Kitchen: Surveillance, Privacy, Sanctions and Governance", Regulation \& Governance 2 (2008), 425-444},
abstract = {Last summer, a member of the Rationality Center at the Hebrew University of Jerusalem installed a closed-circuit TV camera in the Center{\textquoteright}s kitchen. An email explained that the camera was installed in an effort to keep the kitchen clean. By the time the camera was removed, a week later, the members of the Center exchanged close to 120 emails among themselves, expressing their opinions for and against the camera and discussing related issues.Taking off from this exchange, I explore the surprisingly rich set of normative concerns touched upon by the kitchen-camera incident. These include a host of issues regarding people{\textquoteright}s polarized attitudes toward public surveillance, the problem of the invasive gaze and the argument that "if you have nothing to hide you have nothing to worry," the efficacy of disciplining behavior through sanctions along with the problems related to shaming sanctions, the notion of privacy and its arguable relevance to the kitchen case, and more. Special attention is given to the notion ofcleanness and to its related norms.In an epilogue, I offer some reflections in the wake of the incident. I find that it is precisely the smallness, concreteness and seeming triviality of this incident that helps bring a large set of interconnected, vexing normative concerns into sharper relief.},
url = {/files/dp480.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {jean-francoismertens-agwcas2007,
title = {Absorbing Games with Compact Action Spaces},
journal = {Discussion Papers},
number = {456},
year = {2007},
month = {7},
abstract = {We prove that games with absorbing states with compact action sets have a value.},
url = {/files/dp456.pdf},
author = {Jean-Francois Mertens, Abraham Neyman and Dinah Rosenberg}
}
@booklet {arieldprocaccia-aaiodaye2007,
title = {Approximability and Inapproximability of Dodgson and Young Elections},
journal = {Discussion Papers},
number = {466},
year = {2007},
month = {10},
abstract = {The voting rules proposed by Dodgson and Young are both designed to find the candidate closest to being a Condorcet winner, according to two different notions of proximity; the score of a given candidate is known to be hard to compute under both rules. In this paper, we put forward an LP-based randomized rounding algorithm which yields an O(log m) approximation ratio for the Dodgson score, where m is the number of candidates. Surprisingly, we show that the seemingly simpler Young score is NP-hard to approximate by any factor.},
url = {/files/dp466.pdf},
author = {Ariel D. Procaccia, Michal Feldmany and Jeffrey S. Rosenschein}
}
@booklet {karni-bdtatrob2007,
title = {Bayesian Decision Theory and the Representation of Beliefs},
journal = {Discussion Papers},
number = {444},
year = {2007},
month = {1},
abstract = {In this paper, I present a Bayesian decision theory and define choice-based subjective probabilities that faithfully represent Bayesian decision makers prior and posterior beliefs regarding the likelihood of the possible effects contingent on his actions. I argue that no equivalent results can be obtained in Savage s (1954) subjective expected utility theory and give an example illustrating the potential harm caused by ascribing to a decision maker subjective probabilities that do not represent his beliefs.},
url = {/files/dp444.pdf},
author = {Edi Karni}
}
@booklet {abbamkrieger-btmbta2007,
title = {Beat the Mean: Better the Average},
journal = {Discussion Papers},
number = {469},
year = {2007},
month = {11},
publisher = {Journal of Applied Probability 45 (2008), 244-259},
abstract = {We consider a sequential rule, where an item is chosen into the group, such as a university faculty member, only if his score is better than the average score of those already belonging to the group. We study four variables: The average score of the members of the group after k items have been selected, the time it takes (in terms of number of observed items) to assemble a group of k items, the average score of the group after n items have been observed, and the number of items kept after the first n items have been observed. We develop the relationships between these variables, and obtain their asymptotic behavior as k (respectively, n) tends to infinity. The assumption throughout is that the items are independent, identically distributed, with a continuous distribution. Though knowledge of this distribution is not needed to implement the selection rule, the asymptotic behavior does depend on the distribution. We study in some detail the Exponential, Pareto and Beta distributions. Generalizations of the "better than average" rule to the {\texttwosuperior} better than average rules are also considered. These are rules where an item is admitted to the group only if its score is better than {\texttwosuperior} times the present average of the group, where {\texttwosuperior} > 0.},
url = {/files/dp469.pdf},
author = {Abba M. Krieger , Moshe Pollak and Ester Samuel-Cahn}
}
@booklet {zapechelnyuk-bswbr2007,
title = {Better-Reply Strategies with Bounded Recall},
journal = {Discussion Papers},
number = {449},
year = {2007},
month = {3},
abstract = {A decision maker (an agent) is engaged in a repeated interaction with Nature. The objective of the agent is to guarantee to himself the long-run average payoff as large as the best-reply payoff to Nature{\textquoteright}s empirical distribution of play, no matter what Nature does. An agent with perfect recall can achieve this objective by a simple better-reply strategy. In this paper we demonstrate that the relationship between perfect recall and bounded recall is not straightforward: An agent with bounded recall may fail to achieve this objective, no matter how long recall he has and no matter what better-reply strategy he employs.},
url = {/files/dp449.pdf},
author = {Andriy Zapechelnyuk}
}
@booklet {bornstein-acogbpt2007,
title = {A Classification of Games by Player Type},
journal = {Discussion Papers},
number = {443},
year = {2007},
month = {1},
publisher = {New Issues and Paradigms in Research on Social Dilemmas, A. Biel, D. Eek, T. Grling, \& M. Gustafsson (Eds.), Springer Verlag, (in Press)},
abstract = {In this paper I classify situations of interdependent decision-making, or games based on the type of decision-makers, or players involved. The classification builds on a distinction between three basic types of decision-making agents: individuals, cooperative or unitary groups {\textendash} groups whose members can reach a binding (and costless) agreement on a joint strategy {\textendash} and non-cooperative groups {\textendash} groups whose members act independently without being able to make a binding agreement. Pitting individuals, unitary groups, and non-cooperative groups against one another, and adding Nature as a potential opponent , generates a 3 (type of agent) X 4 (type of opponent) matrix of social situations. This framework is used to review the experimental decision-making literature and point out the gaps that still exist in it.},
url = {/files/dp443.pdf},
author = {Gary Bornstein}
}
@booklet {feldman-caocist2007,
title = {Computing an Optimal Contract in Simple Technologies},
journal = {Discussion Papers},
number = {452},
year = {2007},
month = {5},
abstract = {We study an economic setting in which a principal motivates a team of strategic agents to exert costly effort toward the success of a joint project. The action taken by each agent is hidden and affects the (binary) outcome of the agent{\textquoteright}s individual task stochastically. A Boolean function, called technology, maps the individual tasks{\textquoteright} outcomes into the outcome of the whole project. The principal induces a Nash equilibrium on the agents{\textquoteright} actions through payments that are conditioned on the project{\textquoteright}s outcome (rather than the agents{\textquoteright} actual actions) and the main challenge is that of determining the Nash equilibrium that maximizes the principal{\textquoteright}s net utility, referred to as the optimal contract. Babaioff, Feldman and Nisan [1] suggest and study a basic combinatorial agency model for this setting. Here, we concentrate mainly on two extreme cases: the AND and OR technologies. Our analysis of the OR technology resolves an open question and disproves a conjecture raised in [1]. In particular, we show that while the AND case admits a polynomial-time algorithm, computing the optimal contract in the OR case is NP-hard. On the positive side, we devise an FPTAS for the OR case, which also sheds some light on optimal contract approximation of general technologies.},
url = {/files/dp452.pdf},
author = {Feldman, Yuval Emek and Michal}
}
@booklet {ullmann-margalit-dctaonta2007,
title = {Difficult Choices: To Agonize or Not to Agonize?},
journal = {Discussion Papers},
number = {450},
year = {2007},
month = {3},
publisher = {Social Research, 74 (2007), 51-74},
abstract = {What makes a choice difficult, beyond being complex or difficult to calculate? Characterizing difficult choices as posing a special challenge to the agent, and as typically involving consequences of significant moment as well as clashes of values, the article proceeds to compare the way difficult choices are handled by rational choice theory and by the theory that preceded it, Kurt Lewin{\textquoteright}s "conflict theory." The argument is put forward that within rational choice theory no choice is in principle difficult: if the object is to maximize some value, the difficulty can be at most calculative. Several prototypes of choices that challenge this argument are surveyed and discussed (picking, multidimensionality, "big decisions" and dilemmas); special attention is given to difficult choices faced by doctors and layers. The last section discusses a number of devices people employ in their attempt to cope with difficult choices: escape, "reduction" to non-difficult choices, and second-order strategies.},
url = {/files/dp450.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {kareev-doriace2007,
title = {Distribution of Resources in a Competitive Environment},
journal = {Discussion Papers},
number = {465},
year = {2007},
month = {10},
abstract = {When two agents of unequal strength compete, the stronger one is expected to always win the competition. This expectation is based on the assumption that evaluation of performance is flawless. If, however, the agents are evaluated on the basis of only a small sample of their performance, the weaker agent still stands a chance of winning occasionally. A theoretical analysis indicates that for this to happen, the weaker agent must introduce variability into the effort he or she invests in the behavior, such that on some occasions the weaker agent{\textquoteright}s level of performance is as high as that of the stronger agent, whereas on others it is . This, in turn, would drive the stronger agent to introduce variability into his or her behavior. We model this situation in a game, present its game-theoretic solution, and report an experiment, involving 144 individuals, in which we tested whether players are actually sensitive to their relative strengths and know how to allocate their resources given those relative strengths. Our results indicate that they do.},
url = {/files/dp465.pdf},
author = {Kareev, Judith Avrahami and Yaakov}
}
@booklet {serrano-aeior2007,
title = {An Economic Index of Riskiness},
journal = {Discussion Papers},
number = {446},
year = {2007},
month = {2},
publisher = {Journal of Political Economy 116 (2008), 810-836},
abstract = {Define the riskiness of a gamble as the reciprocal of the absolute risk aversion (ARA) of an individual with constant ARA who is indifferent between taking and not taking that gamble. We characterize this index by axioms, chief among them a "duality" axiom which, roughly speaking, asserts that less risk-averse individuals accept riskier gambles. The index is homogeneous of degree 1, monotonic with respect to first and second order stochastic dominance, and for gambles with normal distributions, is half of variance/mean. Examples are calculated, additional properties derived, and the index is compared with others in the literature.},
url = {/files/ Economic\%20Index\%20of\%20Riskiness.pdf},
author = {Serrano, Robert J. Aumann and Roberto}
}
@booklet {sergiuhart-essorgatvorp2007,
title = {Evolutionarily Stable Strategies of Random Games, and the Vertices of Random Polygons},
journal = {Discussion Papers},
number = {445},
year = {2007},
month = {1},
publisher = {Annals of Applied Probability 18 (2008), 1, 259-287},
abstract = {An evolutionarily stable strategy (ESS) is an equilibrium strategy that is immune to invasions by rare alternative ("mutant") strategies. Unlike Nash equilibria, ESS do not always exist in finite games. In this paper, we address the question of what happens when the size of the game increases: does an ESS exist for "almost every large" game? Letting the entries in the n x n game matrix be randomly chosen according to an underlying distribution F, we study the number of ESS with support of size 2. In particular, we show that, as n goes to infinity, the probability of having such an ESS: (i) converges to 1 for distributions F with "exponential and faster decreasing tails" (e.g., uniform, normal, exponential); and (ii) it converges to 1 - 1/sqrt(e) for distributions F with "slower than exponential decreasing tails" (e.g., lognormal, Pareto, Cauchy). Our results also imply that the expected number of vertices of the convex hull of n random points in the plane converges to infinity for the distributions in (i), and to 4 for the distributions in (ii).},
url = {/files/ ess.html},
author = {Sergiu Hart and Benjamin Weiss}
}
@booklet {yaakovkareev-epaatuoc2007,
title = {Expected Prediction Accuracy and the Usefulness of Contingencies},
journal = {Discussion Papers},
number = {455},
year = {2007},
month = {7},
abstract = {Regularities in the environment are used to decide what course of action to take and how to prepare for future events. Here we focus on the utilization of regularities for prediction and argue that the commonly considered measure of regularity - the strength of the contingency between antecedent and outcome events - does not fully capture the goodness of a regularity for predictions. We propose, instead, a new measure - the level of expected prediction accuracy (ExpPA) - which takes into account the fact that, at times, maximal prediction accuracy can be achieved by always predicting the same, most prevalent outcome, and in others, by predicting one outcome for one antecedent and another for the other. Two experiments, testing the ExpPA measure in explaining participants{\textquoteright} behavior, found that participants are sensitive to the twin facets of ExpPA and that prediction behavior is best explained by this new measure.},
url = {/files/dp455.pdf},
author = {Yaakov Kareev, Klaus Fiedler and Avrahami, Judith}
}
@booklet {hart-fqogt2007,
title = {Five Questions on Game Theory},
journal = {Discussion Papers},
number = {453},
year = {2007},
month = {5},
publisher = {Game Theory, Vincent F. Hendricks and Pelle Guldborg Hansen (eds.), Automatic Press / VIP (2007), 97-107},
url = {/files/5 questions},
author = {Sergiu Hart}
}
@booklet {guttel-troop2007,
title = {(Hidden) Risk of Opportunistic Precautions, The},
journal = {Discussion Papers},
number = {471},
year = {2007},
month = {12},
publisher = {Virginia Law Review 93 (2007), 1389-1435},
abstract = {Under the conventional tort law paradigm, a tortfeasor behaves unreasonably when two conditions are met: the tortfeasor could have averted the harm by investing in cost-effective precautions and failed to do so, and other, more cost-effective precautions were not available to the victim. Torts scholarship has long argued that making such a tortfeasor responsible for the ensuing harm induces optimal care. This Article shows that by applying the conventional analysis, courts create incentives for opportunistic investments in prevention. In order to shift liability to others, parties might deliberately invest in precautions even where such investments are inefficient. The Article presents two possible solutions to the problem. By instituting a combination of (1) broader restitution rules and (2) an extended risk-utility standard, legislators and judges can reform tort law to discourage opportunistic precautions and maximize social welfare.},
url = {/files/dp471.pdf},
author = {Guttel, Ehud}
}
@booklet {procaccia-ibme2007,
title = {Implementation by Mediated Equilibrium},
journal = {Discussion Papers},
number = {463},
year = {2007},
month = {9},
publisher = {International Journal of Game Theory 39 (2010), 191-207.},
abstract = {Implementation theory tackles the following problem: given a social choice correspondence, find a decentralized mechanism such that for every constellation of the individuals{\textquoteright} preferences, the set of outcomes in equilibrium is exactly the set of socially optimal alternatives (as specified by the correspondence). In this paper we are concerned with implementation by mediated equilibrium; under such an equilibrium, a mediator coordinates the players{\textquoteright} strategies in a way that discourages deviation. Our main result is a complete characterization of social choice correspondences which are implementable by mediated strong equilibrium. This characterization, in addition to being strikingly concise, implies that some important social choice correspondences which are not implementable by strong equilibrium are in fact implementable by mediated strong equilibrium.},
url = {/files/dp463.pdf},
author = {Procaccia, Bezalel Peleg and Ariel D.}
}
@booklet {nirhalevy-lahamfipiicangp2007,
title = {Ingroup Love" and Outgroup Hate" as Motives for Individual Participation in Intergroup Conflict: A New Game Paradigm},
journal = {Discussion Papers},
number = {474},
year = {2007},
month = {12},
publisher = {Psychological Science (forthcoming)},
abstract = {What motivates individual self-sacrificial behavior in intergroup conflicts? Is it the altruistic desire to help the ingroup or the aggressive drive to hurt the outgroup? This paper introduces a new game paradigm, the Intergroup Prisoner s Dilemma "Maximizing Difference (IPD-MD) game, designed specifically to distinguish between these two motives. The game involves two groups. Each group member is given a monetary endowment and can decide how much of it to contribute. Contribution can be made to either of two pools, one which benefits the ingroup at a personal cost, and another which, in addition, harms the outgroup. An experiment demonstrated that contributions in the IPD-MD game are made almost exclusively to the cooperative within-group pool. Moreover, pre-play intragroup communication increases intragroup cooperation but not intergroup competition. These results are compared with those observed in the Intergroup Prisoner{\textquoteright}s Dilemma (IPD) game, where group members{\textquoteright} contributions are restricted to the competitive between-group pool.},
url = {/files/dp474.pdf},
author = {Nir Halevy, Gary Bornstein and Lilach Sagiv}
}
@booklet {medina-lcmvgtatdeocs2007,
title = {Less Crime, More (Vulnerable) Victims: Game Theory and the Distributional Effects of Criminal Sanctions},
journal = {Discussion Papers},
number = {472},
year = {2007},
month = {12},
publisher = {Review of Law \& Economics 3 (2007), 407-435},
abstract = {Harsh sanctions are conventionally assumed to primarily benefit vulnerable targets. Contrary to this perception, this article shows that augmented sanctions often serve the less vulnerable targets. While decreasing crime, harsher sanctions also induce the police to shift enforcement efforts from more to less vulnerable victims. When this shift is substantial, augmented sanctions exacerbate{\textendash}rather than reduce{\textendash}the risk to vulnerable victims. Based on this insight, this article suggests several normative implications concerning the efficacy of enhanced sanctions, the importance of victims{\textquoteright} funds,and the connection between police operations and apprehension rates.},
url = {/files/db472.pdf},
author = {Medina, Ehud Guttel and Barak}
}
@booklet {procaccia-metv2007,
title = {Mediators Enable Truthful Voting},
journal = {Discussion Papers},
number = {451},
year = {2007},
month = {4},
abstract = {The Gibbard-Satterthwaite Theorem asserts the impossibility of designing a non-dictatorial voting rule in which truth-telling always constitutes a Nash equilibrium. We show that in voting games of complete information where a mediator is on hand, this troubling impossibility result can be alleviated. Indeed, we characterize families of voting rules where, given a mediator, truthful preference revelation is always in strong equilibrium. In particular, we observe that the family of feasible elimination procedures has the foregoing property.},
url = {/files/db451.pdf},
author = {Procaccia, Bezalel Peleg and Ariel D.}
}
@booklet {hart-aomor2007,
title = {An Operational Measure of Riskiness},
journal = {Discussion Papers},
number = {454},
year = {2007},
month = {6},
publisher = {Journal of Political Economy 117 (2009), 5, 785-814},
abstract = {We define the riskiness of a gamble g as that unique number R(g) such that no-bankruptcy is guaranteed if and only if one never accepts gambles whose riskiness exceeds the current wealth.},
url = {/files/ risk.html},
author = {Hart, Dean P. Foster and Sergiu}
}
@booklet {mayabar-hillel-pwcrdgsmlwtpo2007,
title = {Predicting World Cup Results: Do Goals Seem More Likely When They Pay Off?},
journal = {Discussion Papers},
number = {448},
year = {2007},
month = {3},
publisher = {Psychonomic Bulletin and Review 15 (2008), 278-283},
abstract = {In a series of experiments, Bar-Hillel and Budescu (1995) failed to find a desirability bias in probability estimation. The World Cup soccer tournament (of 2002 and 2006) provided an opportunity to revisit the phenomenon, in a context where wishful thinking and desirability bias are notoriously rampant (e.g., Babad, 1991). Participants estimated the probabilities of various teams to win their upcoming games. They were promised money if one particular team, randomly designated by the experimenter, would win its upcoming game. Participants judged their target team more likely to win than other participants, whose promised monetary reward was contingent on the victory of its rival team. Prima facie this seems to be a desirability bias. However, in a follow-up study we made one team salient, without promising monetary rewards, by simply stating that it is "of special interest". Again participants judged their target team more likely to win than other participants, whose "team of special interest" was the rival team. Moreover, the magnitude of the two effects was very similar. On grounds of parsimony, we conclude that what seemed like a desirability bias may just be a salience/marking effect, and {\textendash} though optimism is a robust and ubiquitous human phenomenon {\textendash} wishful thinking still remains elusive.In 2008, a~shorter version of this paper was published under the title Wishful thinking in predicting world cup results as chapter 2 of Rationality and Social Responsibility (J. Krueger, ed.), 175-186.~ In the link todp448, it follows the version published in Psychonomic Bulletin and Review.},
url = {/files/dp448.pdf},
author = {Maya Bar-Hillel, David V. Budescu and Moty Amar}
}
@booklet {venezia-otpffpwdpbtmi2007,
title = {On the Preference for Full-Coverage Policies: Why Do People Buy Too Much Insurance?},
journal = {Discussion Papers},
number = {460},
year = {2007},
month = {8},
publisher = {Journal of Economic Psychology 29 (2008), 747-761},
abstract = {One of the most intriguing questions in insurance is the preference of consumers for low or zero deductible insurance policies. This stands in sharp contrast to a theorem proved by Mossin, 1968, that under quite common assumptions when the price of insurance is higher than its actuarial value, then full coverage is not optimal.We show in a series of experiments that amateur subjects tend to underestimate the value of a policy with a deductible and that the degree of underestimation increases with the size of the deductible. We hypothesize that this tendency is caused by the anchoring heuristic. In particular, in pricing a policy with a deductible subjects first consider the price of a full coverage policy. Then they anchor on the size of the deductible and subtract it from the price of the full coverage policy. However, they do not adjust the price enough upward to take into account the fact that there is only a small chance that the deductible will be applied toward their payments. We also show that professionals in the field of insurance are less prone to such a bias. This implies that a policy with a deductible priced according to the true expected payments may seem overpriced to the insured and therefore may not be purchased. Since the values of full coverage policies are not underestimated the insured may find them as relatively better deals .},
url = {/files/dp460.pdf},
author = {Venezia, Zur Shapira and Itzhak}
}
@booklet {lehmann-apoqlboaatc2007,
title = {A Presentation of Quantum Logic Based on an and Then Connective},
journal = {Discussion Papers},
number = {442},
year = {2007},
month = {1},
abstract = {When a physicist performs a quantic measurement, new information about the system at hand is gathered. This paper studies the logical properties of how this new information is combined with previous information. It presents Quantum Logic as a propositional logic under two connectives: negation and the and then operation that combines old and new information. The and then connective is neither commutative nor associative. Many properties of this logic are exhibited, and some small elegant subset is shown to imply all the properties considered. No independence or completeness result is claimed. Classical physical systems are exactly characterized by the commutativity, the associativity, or the monotonicity of the and then connective. Entailment is defined in this logic and can be proved to be a partial order. In orthomodular lattices, the operation proposed by Finch in [3] satisfies all the properties studied in this paper. All properties satisfied by Finch{\textquoteright}s operation in modular lattices are valid in Quantum Logic. It is not known whether all properties of Quantum Logic are satisfied by Finch{\textquoteright}s operation in modular lattices.},
url = {/files/dp442.pdf},
author = {Daniel Lehmann}
}
@booklet {metge-ptdmipasfb2007,
title = {Protecting the Domestic Market: Industrial Policy and Strategic Firm Behaviour},
journal = {Discussion Papers},
number = {467},
year = {2007},
month = {10},
abstract = {Foreign firms to break into a new market commonly undercut domestic prices and, hence, subsidise the consumer{\textquoteright}s costs of switching in order to get a positive market share. However, this may constitute the act of dumping as drawn in Article VI of the General Agreement on Tariffs and Trade (GATT). Consequently, domestic firms trying to protect themselves against potential competitors often demand an anti-dumping (AD) investigation. In a two-period model of market entry with horizontally differentiated products and exogenous switching costs, it is demonstrated that the mere existence of switching costs and AD-rules may result in an anti-competition effect: the administratively set minimum-price rule protects the domestic firm and yields larger prices. Therefore, there are some consumers who will not buy either product in both periods although they would have done so in absence of AD. Consequently, competition policy should reassess the AD-regulation.},
url = {/files/dp467.pdf},
author = {Jens Metge}
}
@booklet {lehmann-qsatgochs2007,
title = {Quantic Superpositions and the Geometry of Complex Hilbert Spaces},
journal = {Discussion Papers},
number = {447},
year = {2007},
month = {2},
abstract = {The concept of a superposition is a revolutionary novelty introduced by Quantum Mechanics. If a system may be in any one of two pure states x and y, we must consider that it may also be in any one of many superpositions of x and y. This paper proposes an in-depth analysis of superpositions. It claims that superpositions must be considered when one cannot distinguish between possible paths, i.e., histories, leading to the current state of the system. In such a case the resulting state is some compound of the states that result from each of the possible paths. It claims that states can be compounded, i.e., superposed in such a way only if they are not orthogonal. Since different classical states are orthogonal, the claim implies no non-trivial superpositions can be observed in classical systems. It studies the parameters that define such compounds and finds two: a proportion defining the mix of the different states entering the compound and a phase difference describing the interference between the different paths. Both quantities are geometrical in nature: relating one-dimensional subspaces in complex Hilbert spaces. It proposes a formal definition of superpositions in geometrical terms. It studies the properties of superpositions.},
url = {/files/dp447.pdf},
author = {Daniel Lehmann}
}
@booklet {heifetz-re2007,
title = {Rationalizable Expectations},
journal = {Discussion Papers},
number = {461},
year = {2007},
month = {8},
abstract = {Consider an exchange economy with asymmetric information. What is the set of outcomes that are consistent with common knowledge of rationality and market clearing?We propose the concept of CKRMC as an answer to this question. The set of price functions that are CKRMC is the maximal set F with the property that every f{\textasciicircum}{\textasciicircum}F defi{\l}dots}nes prices that clear the markets for demands that can be rationalized by some profile of subjective beliefs on F. Thus, the difference between CKRMC and Rational Expectations Equilibrium (REE) is that CKRMC allows for a situation where the agents do not know the true price function and furthermore may have different beliefs about it. We characterize CKRMC, study its properties, and apply it to a general class of economies with two commodities. CKRMC manifests intuitive properties that stand in contrast to the full revelation property of REE. In particular, we obtain that for a broad class of economies: (1) There is a whole range of prices that are CKRMC in every state. (2) The set of CKRMC outcomes is monotonic with the amount of information in the economy.},
url = {/files/dp461.pdf},
author = {Heifetz, Elchanan Ben-Porath and Aviad}
}
@booklet {weiss-oseapfdts2007,
title = {On Sequential Estimation and Prediction for Discrete Time Series},
journal = {Discussion Papers},
number = {464},
year = {2007},
month = {9},
abstract = {The problem of extracting as much information as possible from a sequence of observations of a stationary stochastic process X0,X1, {\textbrokenbar},Xn has been considered by many authors from different points of view. It has long been known through the work of D. Bailey that no universal estimator for P(Xn+1|X0,X1, ...Xn) can be found which converges to the true estimator almost surely. Despite this result, for restricted classes of processes, or for sequences of estimators along stopping times, universal estimators can be found. We present here a survey of some of the recent work that has been done along these lines.},
url = {/files/dp464.pdf},
author = {Weiss, Gusztav Morvav and Benjamin, Nathans,}
}
@booklet {tamarkeasar-tsfoaedwsfsd2007,
title = {Signaling Function of an Extra-Floral Display: What Selects for Signal Development?, The},
journal = {Discussion Papers},
number = {468},
year = {2007},
month = {11},
abstract = {The vertical inflorescences of the Mediterranean annual Salvia viridis carry many small, colorful flowers, and are frequently terminated by a conspicuous tuft of colorful leaves ("flags") that attracts insect pollinators. Insects may use the flags as indicators of the food reward in the inflorescences, as long-distance cues for locating and choosing flowering patches, or both. Clipping of flags from patches of inflorescences in the field significantly reduced the number of pollinators that arrived at the patches, but not the total number of inflorescences and flowers visited by them. The number of flowers visited per inflorescence significantly increased with inflorescence size, however. Inflorescence and flower visits rates signific antly increased with patch size when flags were present, but not after flag removal. 6\% of the plants in the study population did not develop any flag during blooming, yet suffered no reduction in seed set as compared to flag-bearing neighboring individuals. These results suggest that flags signal long-distance information to pollinators (perhaps indicating patch location or size), while flower-related cues may indicate inflorescence quality.Plants that do not develop flags probably benefit from the flag signals displayed by their neighbors, without bearing the costs of flag production. Thus, flagproducing plants can be viewed as altruists that enhance their neighbors{\textquoteright} fitness. Greenhouse-grown S. viridis plants allocated {\textquoteright}{\textquoteright} 0.5\% of their biomass to flag production, and plants grown under water stress did not reduce their biomassallocation to flags as compared to irrigated controls. These findings suggest that the expenses of flag production are modest, perhaps reducing the cost of altruism. We discuss additional potential evolutionary mechanisms that may select for the maintenance of flag production.},
url = {/files/dp468.pdf},
author = {Tamar Keasar, Adi Sadeh and Avi Shmida}
}
@booklet {cleliadiserio-sspftcm2007,
title = {Simpson S Paradox for the Cox Model},
journal = {Discussion Papers},
number = {441},
year = {2007},
month = {1},
publisher = {Scandinavian Journal of Statistics 36, 463-480 (2009)},
abstract = {{In the context of survival analysis, we define a covariate X as protective (detrimental) for the failure time T if the conditional distribution of [T | X = x] is stochastically increasing (decreasing) as a function of x. In the presence of another covariate Y, there exist situations where [T | X = x},
url = {/files/dp441.pdf},
author = {Clelia Di Serio, Yosef Rinott and Marco Scarsini}
}
@booklet {rinott-osiusb2007,
title = {On Statistical Inference Under Selection Bias},
journal = {Discussion Papers},
number = {473},
year = {2007},
month = {12},
publisher = {The American Statistician 63 211-217 (2009)},
abstract = {This note revisits the problem of selection bias, using a simple binomial example. It focuses on selection that is introduced by observing the data and making decisions prior to formal statistical analysis. Decision rules and interpretation of confidence measure and results must then be taken relative to the point of view of the decision maker, i.e., before selection or after it. Such a distinction is important since inference can be considerably altered when the decision maker{\textquoteright}s point of view changes. This note demonstrates the issue, using both the frequentist and the Bayesian paradigms.},
url = {/files/dp473.pdf},
author = {Rinott, Micha Mandel and Yosef}
}
@booklet {peretz-tsvor2007,
title = {Strategic Value of Recall, The},
journal = {Discussion Papers},
number = {470},
year = {2007},
month = {11},
abstract = {This work studies the value of two-person zero-sum repeated games in which at least one of the players is restricted to (mixtures of) bounded recall strategies. A (pure) k-recall strategy is a strategy that relies only on the last k periods of history. This work improves previous results [Lehrer, Neyman and Okada] on repeated games with bounded recall. We provide an explicit formula for the asymptotic value of the repeated game as a function of the stage game, the duration of the repeated game, and the recall of the agents.},
url = {/files/dp470.pdf},
author = {Ron Peretz}
}
@booklet {ben-porath-twhb2007,
title = {Trade with Heterogeneous Beliefs},
journal = {Discussion Papers},
number = {462},
year = {2007},
month = {8},
abstract = {The paper analyzes an economy with asymmetric information in which agents trade in contingent assets. The new feature in the model is that each agent may have any prior belief on the states of nature and thus the posterior belief of an agent maybe any probability distribution that is consistent with his private information. We study two solution concepts: Equilibrium, which assumes rationality and market clearing, and common knowledge equilibrium (CKE) which makes the stronger assumption that rationality, market clearing, and the parameters which de{\textquoteright}{\l}dots}ne the economy are common knowledge. The two main results characterize the set of equilibrium prices and the set of CKE prices in terms of parameters which specify for each state s and event E the amount of money in the hands of agents who know the event E at the state s. The characterizations that are obtained apply to a broad classof preferences which include all preferences that can be represented by the expectation of a state dependent monotone utility function. One implication of these results is a characterization of the information that is revealed in a CKE.},
url = {/files/dp462.pdf},
author = {Elchanan Ben-Porath}
}
@booklet {babichenko-uaapne2007,
title = {Uncoupled Automata and Pure Nash Equilibria},
journal = {Discussion Papers},
number = {459},
year = {2007},
month = {8},
abstract = {We study the problem of reaching Nash equilibria in multi-person games that are repeatedly played, under the assumption of uncoupledness: every player knows only his own payoff function. We consider strategies that can be implemented by {\textquoteright}{\l}dots}finite-state automata, and characterize the minimal number of states needed in order to guarantee that a pure Nash equilibrium is reached in every game where such an equilibrium exists.},
url = {/files/dp459.pdf},
author = {Yakov Babichenko}
}
@booklet {tamarkeasar-vinpayatrtpviams2007,
title = {Variability in Nectar Production and Yield, and Their Relation to Pollinator Visits, in a Mediterranean Shrub},
journal = {Discussion Papers},
number = {458},
year = {2007},
month = {7},
publisher = {Arthropod Plant Interactions 2 (2008), 117-123},
abstract = {Nectar yields (standing crops) in flowers within an individual plant are often highly variable. This variability may be a by-product of the foraging activity of insect pollinators. Alternatively, plants may be selected to produce highly variable rewards to reduce consecutive visitation by risk-averse pollinators, thus diminishing within-plant pollen transfer. This study evaluated the roles of pollinator control vs. plant control over nectar variability in the bee-pollinated shrub Rosmarinus officinalis L. We sampled nectar production, yield and pollinator visits in three shrubs of one population over 17 days during one blooming season. Nectar production rates were highly variable (CV=1.48), and increased after rainy days. Nectar yields were even more variable (CV=2.16), and decreased with increasing temperatures. Pollinator visit rates decreased with variability in nectar yields, increased with flower number per shrub, and were unaffected by variability in nectar production rates. Repeated sampling of marked flowers revealed no correlation between their nectar yields and production rates. These findings support the role of reward variance in reducing pollinator visits, but suggest that plants are not in complete control of this variability. Rather, plant-generated variability can be modified by intensive foraging activity of pollinators. Such pollinator control over nectar variability is likely to reduce the selective advantage of plant-generated reward variation. Plant-controlled variability may provide evolutionary advantage when pollinator activity is insufficient to generate reward variation.},
url = {/files/dp458.pdf},
author = {Tamar Keasar, Adi Sadeh and Avi Shmida}
}
@booklet {zamir-aaasttguc2006,
title = {Asymmetric Auctions: Analytic Solutions to the General Uniform Case},
journal = {Discussion Papers},
number = {432},
year = {2006},
month = {9},
abstract = {While auction research, including asymmetric auctions, has grown significantly in recent years, there is still little analytical solutions of first-price auctions outside the symmetric case. Even in the uniform case, Griesmer et al. (1967) and Plum (1992) find solutions only to the case where the lower bounds of the two distributions are the same. We present the general analytical solutions to asymmetric auctions in the uniform case for two bidders, both with and without a minimum bid. We show that our solution is consistent with the previously known solutions of auctions with uniform distributions. Several interesting examples are presented including a class where the two bid functions are linear. We hope this result improves our understanding of auctions and provides a useful tool for future research in auctions.},
url = {/files/dp432.pdf},
author = {Zamir, Todd R. Kaplan and Shmuel}
}
@booklet {zapechelnyuk-bwab2006,
title = {Bargaining with a Bureaucrat},
journal = {Discussion Papers},
number = {425},
year = {2006},
month = {6},
abstract = {We consider a bargaining problem where one of the players, the bureaucrat, has the power to dictate any outcome in a given set. The other players, the agents, negotiate with him which outcome to be dictated. In return, the agents transfer some part of their payoffs to the bureaucrat. We state five axioms and characterize the solutions which satisfy these axioms on a class of problems which includes as a subset all submodular bargaining problems. Every solution is characterized by a number $\pm$ in the unit interval. Each agent in every bargaining problem obtains a weighted average of his individually rational level and his marginal contribution to the set of all players, where the weights are $\pm$ and 1 - $\pm$, respectively. The bureaucrat obtains the remaing surplus. The solution when $\pm$ = 1/2 is the nucleolus of a naturally related game in characteristic form.},
url = {/files/dp425.pdf},
author = {Zapechelnyuk, Yair Tauman and Andriy}
}
@booklet {yaniv-tboao2006,
title = {Benefit of Additional Opinions, The},
journal = {Discussion Papers},
number = {422},
year = {2006},
month = {5},
publisher = {Current Directions in Psychological Science 13 (2004), 75-78},
abstract = {In daily decision making, people often solicit one another{\textquoteright}s opinions in the hope of improving their own judgment. According to both theory and empirical results, integrating even a few opinions is beneficial, with the accuracy gains diminishing as the bias of the judges or the correlation between their opinions increases. Decision makers using intuitive policies for integrating others{\textquoteright} opinions rely on a variety of accuracy cues in weighting the opinions they receive. They tend to discount dissenters and to give greater weight to their own opinion than to other people{\textquoteright}s opinions.},
url = {/files/dp422.pdf},
author = {Yaniv, Ilan}
}
@booklet {avrahami-cbaasuiolos2006,
title = {Choosing Between Adaptive Agents: Some Unexpected Implications of Level of Scrutiny},
journal = {Discussion Papers},
number = {436},
year = {2006},
month = {10},
abstract = {Even with ample time and data at their disposal, people often make do with small samples, which increases their risk of making the wrong decision. A theoretical analysis indicates, however, that when the decision involves selecting among competing, adaptive agents who are eager to be selected, an error-prone evaluation may be beneficial to the decision maker. In this case, the chance of an error can motivate competitors to exert greater effort, improving their level of performance which is the prime concern of the decision maker. This theoretical argument was tested empirically by comparing the effects of two levels of scrutiny of performance. Results show that minimal scrutiny can indeed lead to better performance than full scrutiny, and that the effect is conditional on a bridgeable difference between the competitors. We conclude by pointing out that error-prone decisions based on small samples may also maintain competition and diversity in the environment.},
url = {/files/dp436.pdf},
author = {Avrahami, Yaakov Kareev and Judith}
}
@booklet {mansour-tccounep2006,
title = {Communication Complexity of Uncoupled Nash Equilibrium Procedures, The},
journal = {Discussion Papers},
number = {419},
year = {2006},
month = {4},
abstract = {We study the question of how long it takes players to reach a Nash equilibrium in "uncoupled" setups, where each player initially knows only his own payoff function. We derive lower bounds on the number of bits that need to be transmitted in order to reach a Nash equilibrium, and thus also on the required number of steps. Specifically, we show lower bounds that are exponential in the number of players in each one of the following cases: (1) reaching a pure Nash equilibrium; (2) reaching a pure Nash equilibrium in a Bayesian setting; and (3) reaching a mixed Nash equilibrium. Finally, we show that some very simple and naive procedures lead to similar exponential upper bounds.},
url = {/files/ comcom.html},
author = {Mansour, Sergiu Hart and Yishay}
}
@booklet {spencer-caep2006,
title = {Complexity and Effective Prediction},
journal = {Discussion Papers},
number = {435},
year = {2006},
month = {10},
abstract = {Let G = (I,J,g) be a two-person zero-sum game. We examine the two-person zero-sum repeated game G(k,m) in which player 1 and 2 place down finite state automata with k,m states respectively and the payoff is the average per stage payoff when the two automata face off. We are interested in the cases in which player 1 is "smart" in the sense that k is large but player 2 is "much smarter" in the sense that m>>k. Let S(g) be the value of G were the second player is clairvoyant, i.e., would know the player 1{\textquoteright}s move in advance. The threshold for clairvoyance is shown to occur for m near min(|I|, |J|)^k. For m of roughly that size, in the exponential scale, the value is close to S(g). For m significantly smaller (for some stage payoffs g) the value does not approach S(g).},
url = {/files/dp435.pdf},
author = {Spencer, Abraham Neyman and Joel}
}
@booklet {ifatmaoz-dfasfcitic2006,
title = {Decision Framing and Support for Concessions in the Israeli-Palestinian Conflict},
journal = {Discussion Papers},
number = {423},
year = {2006},
month = {5},
publisher = {Journal of Peace Research (forthcoming)},
abstract = {The purpose of the study is to explore, in the context of the Israeli-Palestinian conflict, the influence of framing a decision task as inclusion or exclusion on Israeli-Jewish respondents{\textquoteright} support for the concession of Jewish settlements in the West Bank and Gaza. Respondents received a list of 40 Jewish settlements. Details such as the number of residents and geographical location were provided for each settlement. The respondents were randomly assigned to one of two conditions. In the inclusion condition 55 respondents were asked to mark the settlements for which they recommended that Israeli sovereignty be conceded. In the exclusion condition 53 respondents were asked to mark the settlements for which they recommended that Israeli sovereignty not be conceded. The findings confirm the predictions tested and indicate that: (1) Framing the task in terms of inclusion or exclusion affects respondents{\textquoteright} support for territorial compromise, so that respondents in the exclusion condition support the concession of more settlements than respondents in the inclusion condition. (2) Framing the task in terms of inclusion or exclusion has a greater effect on support for conceding options (settlements) that are perceived as ambiguous (less consensual in the climate of opinion) in comparison to options (settlements) that are perceived as more clear-cut (more consensual). The theoretical and practical implications of these findings are discussed.},
url = {/files/dp423.pdf},
author = {Ifat Maoz, Ilan Yaniv and Naama Ivri}
}
@booklet {gorodeisky-daobdftmpg2006,
title = {Deterministic Approximation of Best-Response Dynamics for the Matching Pennies Game [Revised]},
journal = {Discussion Papers},
number = {437},
year = {2006},
month = {11},
abstract = {We consider stochastic dynamics for the Matching Pennies game, hich behave, in expectation, like the best-response dynamics (i.e., the ontinuous fictitious play). Since the corresponding vector field is not ontinuous, we cannot apply the deterministic approximation results of Bena{\textasciimacron}m and Weibull [2003]. Nevertheless, we prove such results for our dynamics by developing the notion of a "leading coordinate."},
url = {/files/dp437.pdf},
author = {Ziv Gorodeisky}
}
@booklet {sheshinski-daiape2006,
title = {Differentiated Annuities in a Pooling Equilibrium},
journal = {Discussion Papers},
number = {433},
year = {2006},
month = {9},
abstract = {Regular annuities provide payment for the duration of an owner{\textquoteright}s lifetime. Period-Certain annuities provide additional payment after death to a beneficiary provided the insured dies within a certain period after annuitization. It has been argued that the bequest option offered by the latter is dominated by life insurance which provides non-random bequests. This is correct if competitive annuity and life insurance markets have full information about individual longevities. In contrast, this paper shows that when individual longevities are private information, a competitive pooling equilibrium which offers annuities at common prices to all individuals may have positive amounts of both types of annuities in addition to life insurance. In this equilibrium, individuals self-select the types of annuities that they purchase according to their longevity prospects. The break-even price of each type of annuity reflects the average longevity of its buyers. The broad conclusion that emerges from this paper is that adverse-selection due to asymmetric information is reflected not only in the amounts of insurance purchased but, importantly, also in the choice of insurance products suitable for different individual characteristics. This conclusion is supported by recent empirical work about the UK annuity market (Finkelstein and Poterba (2004)).},
url = {/files/dp433.pdf},
author = {Eytan Sheshinski}
}
@booklet {hart-dcbaglg2006,
title = {Discrete Colonel Blotto and General Lotto Games},
journal = {Discussion Papers},
number = {434},
year = {2006},
month = {10},
publisher = {International Journal of Game Theory 36 (2008), 3-4, 441-460},
abstract = {A class of integer-valued allocation games {\textendash} "General Lotto games" {\textendash} is introduced and solved. The results are then applied to analyze the classical discrete "Colonel Blotto games"; in particular, optimal strategies are obtained for all symmetric Colonel Blotto games.},
url = {http://www.ma.huji.ac.il/hart/abs/blotto.html},
author = {Sergiu Hart}
}
@booklet {weiss-eitofoi2006,
title = {Entropy Is the Only Finitely Observable Invariant},
journal = {Discussion Papers},
number = {420},
year = {2006},
month = {5},
abstract = {Our main purpose is to present a very surprising new characterization of the Shannon entropy of stationary ergodic processes. We will use two basic concepts: isomorphism of stationary processes and a notion of finite observability, and we will see how one is led, inevitably, to Shannon{\textquoteright}s entropy. A function J with values in some metric space, defined on all finite-valued, stationary, ergodic processes is said to be finitely observable (FO) if there is a sequence of functions Sn(x1,x2,...,xn) that for all processes~\S converges to J(\S) for almost every realization x1{\textasciicircum}\v z of \S. It is called an invariant if it returns the same value for isomorphic processes. We show that any finitely observable invariant is necessarily a continuous function of the entropy. Several extensions of this result will also be given.},
url = {/files/dp420.pdf},
author = {Weiss, Donald Ornstein and Benjamin, Nathans,}
}
@booklet {ullmann-margalit-ff2006,
title = {Family Fairness},
journal = {Discussion Papers},
number = {427},
year = {2006},
month = {8},
publisher = {Social Research 73 (2006), 575-596},
abstract = {This paper is the last part of a three-part project. The larger picture is important for the proper framing of the present paper. Here then is an abstract of the three-part paper, which is about considerateness. Focusing on two extreme poles of the spectrum of human relationships, the paper argues that considerateness is the foundation upon which relationships are to be organized in both the thin anonymous context of the public space and the thick intimate context of the family. The first part of the paper introduces the notion of considerateness among strangers and explores the idea that considerateness is the minimum that we owe to one another in the public space. By acting considerately toward strangers for example, by holding a door open so it does not slam in the face of the next person who enters we show respect to that which we all share as people, namely, our common humanity. The second part explores the idea that considerateness is the foundation underlying the constitution of the exemplary family. I hypothesize that each family adopts its own particular distribution of domestic burdens and benefits and I refer to it as the family deal. The argument is that the considerate family deal embodies a notion of fairness that is a distinct, family-oriented notion of fairness. The third part of the larger paper which is the part I present here takes up the notion of family fairness and contrasts it with justice. In particular, I take issue with Susan Okin s notion of the just family and develop, instead, the notion of the not-unjust fair family. Driving a wedge between justice and fairness, I propose that family fairness is partial and sympathetic rather than impartial and empathic, and that it is particular and internal rather than universalizable. Furthermore, I claim that family fairness is based on ongoing comparisons of preferences among family members. I finally characterize the good family as a not-unjust family that is considerate and fair.},
url = {/files/dp427.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {iritnowik-gitnstgmp2006,
title = {Games in the Nervous System: The Game Motoneurons Play},
journal = {Discussion Papers},
number = {440},
year = {2006},
month = {12},
abstract = {Game theory is usually applied to biology through evolutionary games. However, many competitive processes in biology may be better understood by analyzing them on a shorter time-scale than the time-course considered in evolutionary dynamics. Instead of the change in the "fitness" of a player, which is the traditional payoff in evolutionary games, we define the payoff function, tailored to the specific questions addressed. In this work we analyze the developmental competition that arises between motoneurons innervating the same muscle. The "size principle" - a fundamental principle in the organization of the motor system, stating that motoneurons with successively higher activation-threshold innervate successively larger portions of the muscle - emerges as a result of this competition. We define a game, in which motoneurons compete to innervate a maximal number of muscle-fibers. The strategies of the motoneurons are their activation-thresholds. By using a game theoretical approach we succeed to explain the emergence of the size principle and to reconcile seemingly contradictory experimental data on this issue. The evolutionary advantage of properties as the size principle, emerging as a consequence of competition rather than being genetically hardwired, is that it endows the system with adaptation capabilities, such that the outcome may be fine-tuned to fit the environment. In accordance with this idea the present study provides several experimentally-testable predictions regarding the magnitude of the size principle in different muscles.},
url = {/files/dp440.pdf},
author = {Irit Nowik, Idan Segev and Shmuel Zamir}
}
@booklet {tamarkeasar-hosapatcofb2006,
title = {Honesty of Signaling and Pollinator Attraction: The Case of Flag-Like Bracts},
journal = {Discussion Papers},
number = {438},
year = {2006},
month = {12},
publisher = {Israel Journal of Plant Sciences 54 (2006), 119-128},
abstract = {Bracts are nonfloral showy structures associated with inflorescences. They are generally hypothesized to enhance plant reproductive success by attracting pollinating insects. We investigated whether flag-like bracts at the top of inflorescences reliably signal of floral food reward for pollinators in Salvia viridis L. Field and greenhouse data indicate incomplete synchrony between the development of flowers and bracts. Various measures of bract size, however, positively correlate with the number of open flowers on the inflorescence, and with their nectar rewards. Experimental removal of bracts from inflorescences significantly reduced honeybee visitation in the field. We compared these findings with field data on Lavandula stoechas L., another labiate species with flag-like displays. The number of open flowers in L. stoechas cannot be reliably predicted from the presence or size of the bracts. Bract clipping does not significantly reduce honeybee visits in this species. We conjecture that bees learn to orient to those bracts that reliably signal food rewards, and disregard bracts if they provide unreliable signals. Asynchronous development of bracts and floral rewards can reduce the reliability of the signals, and may explain the rarity of flag-like displays in pollination systems. We discuss additional selective forces that may favor bract displays.},
url = {/files/dp438.pdf},
author = {Tamar Keasar, Gad Pollak, Rachel Arnon, Dan Cohen and Avi Shmida}
}
@booklet {peleg-lcosg2006,
title = {Lexicographic Composition of Simple Games},
journal = {Discussion Papers},
number = {415},
year = {2006},
month = {2},
publisher = {Games and Economic Behavior 62 (2008), 628-642},
abstract = {A two-house legislature can often be modelled as a proper simple game whose outcome depends on whether a coalition wins, blocks or loses in two smaller proper simple games. It is shown that there are exactly five ways to combine the smaller games into a larger one. This paper focuses on one of the rules, lexicographic composition, where a coalition wins in G1{\textquoteright}{\textquoteright}{\textdaggerdbl}{\textquoteright}G2 when it either wins in G1, or blocks in G1 and wins in G2. It is the most decisive of the five. A lexicographically decomposable game is one that can be represented in this way using components whose player sets partition the whole set. Games with veto players are not decomposable, and anonymous games are decomposable if and only if they are decisive and have two or more players. If a player{\textquoteright}s benefit is assessed by any semi-value, then for two isomorphic games a player is better off from having a role in the first game than having the same role in the second. Lexicographic decomposability is sometimes compatible with equality of roles. A relaxation of it is suggested for its practical benefits.},
url = {/files/db415.pdf},
author = {Peleg, Barry O{\textquoteright}Neill and Bezalel}
}
@booklet {zapechelnyuk-omfaam2006,
title = {Optimal Mechanisms for an Auction Mediator},
journal = {Discussion Papers},
number = {424},
year = {2006},
month = {6},
abstract = {We consider a multi-period auction with a seller who has a single object for sale, a large population of potential buyers, and a mediator of the trade. The seller and every buyer have independent private values of the object. The mediator designs an auction mechanism which maximizes her revenue subject to certain constraints for the traders. In each period the seller auctions the object to a set of buyers drawn at random from the population. The seller can re-auction the object (infinitely many times) if it is not sold in previous interactions. We characterize the class of mediator-optimal auction mechanisms. One of such mechanisms is a Vickrey auction with a reserve price where the seller pays to the mediator a fixed percentage from the closing price.},
url = {/files/dp424.pdf},
author = {Zapechelnyuk, Alexander Matros and Andriy}
}
@booklet {eden-otic2006,
title = {Optimal Ties in Contests},
journal = {Discussion Papers},
number = {430},
year = {2006},
month = {9},
abstract = {I analyze a mechanism design of a tournament in which the principal can strategically enhance the probability of a tie. The principal decides on a "tie distance" and announces a rule according to which a tie is declared if the difference between the two contestants{\textquoteright} performances is within the tie distance. I show that the contestants{\textquoteright} equilibrium efforts do not depend on the prizes awarded in case of a tie. I find that there are cases in which the optimal mechanism has a positive tie distance.},
url = {/files/dp430.pdf},
author = {Eden, Maya}
}
@booklet {sheshinski-octipe2006,
title = {Optimum Commodity Taxation in Pooling Equilibria},
journal = {Discussion Papers},
number = {429},
year = {2006},
month = {9},
abstract = {This paper extends the standard model of optimum commodity taxation (Ramsey (1927) and Diamond-Mirrlees (1971)) to a competitive economy in which some markets are inefficient due to asymmetric information. As in most insurance markets, consumers impose varying costs on suppliers but firms cannot associate costs to customers and consequently all are charged equal prices. In a competitive pooling equilibrium, the price of each good is equal to average marginal costs weighted by equilibrium quantities. We derive modified Ramsey-Boiteux Conditions for optimum taxes in such an economy and show that they include general-equilibrium effects which reflect the initial deviations of producer prices from marginal costs, and the response of equilibrium prices to the taxes levied. It is shown that condition on the monotonicity of demand elasticities enables to sign the deviations from the standard formula. The general analysis is applied to the optimum taxation of annuities and life insurance.},
url = {/files/dp429.pdf},
author = {Eytan Sheshinski}
}
@booklet {shapira-psabdd2006,
title = {Performance Sampling and Bimodal Duration Dependence},
journal = {Discussion Papers},
number = {431},
year = {2006},
month = {9},
publisher = {Journal of Mathematical Sociology 33 (2009), 1-27 (forthcoming)},
abstract = {Performance sampling models of duration dependence in employee turnover and firm exit predict that hazard rates will initially be low, gradually rise to a maximum, and then fall. As we note in this paper, however, several empirical duration distributions have bimodal hazard rates. This paper shows that such bimodal hazard rates can be derived from existing models of performance sampling by small changes in the assumptions. In particular, bimodal hazard rates emerge if the mean or the variance of performances changes over time, which would occur if employees or firms face more challenging tasks over time. Using data on turnover in law firms, we show that the hazard rate predicted by these models fit data better than existing models.},
url = {/files/dp431.pdf},
author = {Shapira, Jerker Denrell and Zur}
}
@booklet {shapira-tpobtwaasijtotcodp2006,
title = {Perils of Betting to Win: Aspiration and Survival in Jeopardy! Tournament of the Champions (revision of Discussion Paper $\#$331), The},
journal = {Discussion Papers},
number = {417},
year = {2006},
month = {3},
abstract = {Behavior in competitive situations requires decision makers to evaluate their own as well as their competitors{\textquoteright} positions. Using data from a realistic competitive risk-taking setting, Jeopardy{\textquoteright}s Tournament of Champions (TOC), we test whether players choose the strategic best response when making their betting decisions. Analyses show that the percentage of players choosing the strategic best response is very low, a surprising finding because the TOC is contested by the best and most experienced players of the game. We conjecture that performance aspiration and survival targets that guide risk-taking behavior in competitive situations may lead players to select inferior competitive strategies.},
url = {/files/dp417.pdf},
author = {Shapira, Elizabeth Boyle and Zur}
}
@booklet {russo-pgabd2006,
title = {Public Goods and Budget Deficit},
journal = {Discussion Papers},
number = {426},
year = {2006},
month = {7},
abstract = {We examine incentive-compatible mechanisms for fair financing and efficient selection of a public budget (or public good). A mechanism selects the level of the public budget and imposes taxes on individuals. Individuals{\textquoteright} preferences are quasilinear. Fairness is expressed as weak monotonicity (called scale monotonicity) of the tax imposed on an individual as a function of his benefit from an increased level of the public budget. Efficiency is expressed as selection of a Pareto-optimal level of the public budget. The budget deficit is the difference between the public budget and the total amount of taxes collected from the individuals. We show that any efficient scale-monotonic and incentive-compatible mechanism may generate a budget deficit. Moreover, it is impossible to collect taxes that always cover a fixed small fraction of the total cost.},
url = {/files/dp426.pdf},
author = {Russo, Abraham Neyman and Tim}
}
@booklet {hart-ragaet2006,
title = {Robert Aumann{\textquoteright}s Game and Economic Theory},
journal = {Discussion Papers},
number = {416},
year = {2006},
month = {3},
publisher = {Scandinavian Journal of Economics 108 (2006), 185-211},
abstract = {An overview of the landmark contributions of Robert J. Aumann, winner of the 2005 Nobel Memorial Prize in Economic Sciences.},
url = {/files/ aumann-n.html},
author = {Sergiu Hart}
}
@booklet {kalai-sbakaprorjasa2006,
title = {Science, Beliefs and Knowledge: A Personal Reflection on Robert J. Aumann S Approach},
journal = {Discussion Papers},
number = {418},
year = {2006},
month = {4},
abstract = {On the occasion of Robert J. Aumann{\textquoteright}s being awarded the 2005 Nobel Prize in Economics, this paper gives a personal view on some of Aumann{\textquoteright}s contributions, and primarily on his approach to foundational issues in game theory, economics, and science as a whole. It is based on numerous discussions and e-mail exchanges we had in the 1990{\textquoteright}s, dealing with various scientific and political matters, including our long debate on the {\textquoteleft}{\textquoteleft}Bible Code{\textquoteright} controversy.},
url = {/files/dp418.pdf},
author = {Gil Kalai}
}
@booklet {hart-sv2006,
title = {Shapley Value},
journal = {Discussion Papers},
number = {421},
year = {2006},
month = {5},
publisher = {The New Palgrave: A Dictionary of Economics, John Eatwell, Murray Milgate and Peter Newman (editors), Macmillan Press (1987), Vol. 4, 318-320 Game Theory, John Eatwell, Murray Milgate and Peter Newman (editors), Macmillan Press (1989), 210-216},
abstract = {The Shapley value is an a priori evaluation of the prospects of a player in a multi-person game. Introduced by Lloyd S. Shapley in 1953, it has become a central solution concept in cooperative game theory. The Shapley value has been applied to economic, political, and other models.},
url = {/files/ val-palg2.html},
author = {Sergiu Hart}
}
@booklet {perry-twmr2006,
title = {Tournaments with Midterm Reviews},
journal = {Discussion Papers},
number = {414},
year = {2006},
month = {1},
abstract = {In many tournaments investments are made over time and conducting a review only once at the end, or also at points midway through, is a strategic decision of the tournament designer. If the latter is chosen, then a rule according to which the results of the different reviews are aggregated into a ranking must also be determined. This paper takes a first step in the direction of answering how such rules should be optimally designed. A characterization of the optimal aggregation rule is provided for a two-agent two-stage tournament. In particular, we show that treating the two reviews symmetrically may result in an equilibrium effort level that is inferior to the one in which only a final review is conducted. However, treating the two reviews lexicographically by first looking at the final review, and then using the midterm review only as a tie-breaking rule, strictly dominates the option of conducting a final review only. The optimal mechanism falls somewhere in between these two extreme mechanisms. It is shown that the more effective the first-stage effort is in determining the final review{\textquoteright}s outcome, the smaller is the weight that should be assigned to the midterm review in determining the agents{\textquoteright} ranking.},
url = {/files/dp414.pdf},
author = {Perry, Alex Gershkov and Motty}
}
@booklet {rachelarnon-voaccacbbtl2006,
title = {Vertical Orientation and Color Contrast and Choices by Bumblebees (Bombus Terrestris L.)},
journal = {Discussion Papers},
number = {439},
year = {2006},
month = {12},
abstract = {The vertical inflorescences of several plant species are terminated by colorful bracts, which attract insect pollinators. The bracts contrast in color with the leaves below them, and are oriented perpendicular to the flowers on the inflorescence. We conducted laboratory experiments to determine the effects of color contrast and perpendicular orientation on the feeding choices of bumblebees. We first trained bees to feeders with color-contrasting perpendicular displays, composed of a horizontal and a vertical display component. We subsequently recorded the bees{\textquoteright} choices among feeders that displayed only one of these cues. The bees preferred perpendicular displays that resembled the training model in the color of the horizontal component. None of them chose a color-contrasting display that was not perpendicular. We then evaluated the effects of the horizontal vs. vertical components of perpendicular displays on the bees{\textquoteright} choices. After training bees to color-contrasting perpendicular displays, we allowed them to choose between displays that had either the same horizontal or the same vertical component as the training model. Foragers mostly oriented to the horizontal displays to which they had been trained. Our results suggest that (a) bumblebees can learn to associate three-dimensional perpendicular color-contrasting displays with food rewards; (b) these displays are processed hierarchically, with orientation dominating color contrast; (c) The horizontal component of perpendicular displays dominates the vertical component. We discuss possible implications of our findings for the evolution of flower signals based on extra-floral bracts.},
url = {/files/dp439.pdf},
author = {Rachel Arnon, Tamar Keasar, Dan Cohen and Avi Shmida}
}
@booklet {aumann-wap2006,
title = {War and Peace},
journal = {Discussion Papers},
number = {428},
year = {2006},
month = {8},
publisher = {Les Prix Nobel 2005 (forthcoming)},
abstract = {Nobel Lecture.},
url = {/files/dp428.pdf},
author = {Robert J. Aumann}
}
@booklet {ronholzman-bsomvgodp2005,
title = {Bargaining Sets of Majority Voting Games (revision of Discussion Paper $\#$376)},
journal = {Discussion Papers},
number = {410},
year = {2005},
month = {11},
publisher = {Mathematics of Operations Research 32 (2007), 857-872},
abstract = {Let A be a finite set of m alternatives, let N be a finite set of n players and let RN be a profile of linear preference orderings on A of the players. Let uN be a profile of utility functions for RN. We define the NTU game VuN that corresponds to simple majority voting, and investigate its Aumann-Davis-Maschler and Mas-Colell bargaining sets. The first bargaining set is nonempty for m 3 and it may be empty for m {\textyen} 4. However, in a simple probabilistic model, for fixed m, the probability that the Aumann-Davis-Maschler bargaining set is nonempty tends to one if n tends to infinity. The Mas-Colell bargaining set is nonempty for m 5 and it may be empty for m {\textyen} 6. Furthermore, it may be empty even if we insist that n be odd, provided that m is sufficiently large. Nevertheless, we show that the Mas-Colell bargaining set of any simple majority voting game derived from the k-th replication of RN is nonempty, provided that k {\textyen} n + 2.},
url = {/files/dp410.pdf},
author = {Ron Holzman, Bezalel Peleg and Peter Sudholter}
}
@booklet {ullmann-margalit-bdocd2005,
title = {Big Decisions: Opting, Converting, Drifting},
journal = {Discussion Papers},
number = {409},
year = {2005},
month = {11},
publisher = {In Anthony O{\textquoteright}Hear (ed.), Political Philosophy, Cambridge: Cambridge University Press, 2006},
url = {/files/dp409.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {nisan-otcpoiaidq2005,
title = {On the Computational Power of Iterative Auctions I: Demand Queries},
journal = {Discussion Papers},
number = {381},
year = {2005},
month = {2},
abstract = {We study the computational power and limitations of iterative combinatorial auctions. Most existing iterative combinatorial auctions are based on repeatedly suggesting prices for bundles of items, and querying the bidders for their "demand under these prices. We prove several results regarding such auctions that use a polynomial number of demand queries: (1) that such auctions can simulate several other natural types of queries; (2) that such auctions can solve linear programming relaxations of winner determination problems; (3) that they can approximate the optimal allocation as well as generally possible using polynomial communication or computation, while weaker types of queries can not do so. We also initiate the study of how can the prices of bundles be represented when they are not linear, and show that the "default representation has severe limitations.},
url = {/files/dp381.pdf},
author = {Nisan, Liad Blumrosen and Noam}
}
@booklet {nisan-otcpoiaiaa2005,
title = {On the Computational Power of Iterative Auctions II: Ascending Auctions},
journal = {Discussion Papers},
number = {382},
year = {2005},
month = {2},
abstract = {We embark on a systematic analysis of the power and limitations of iterative ascending-price combinatorial auctions. We prove a large number of results showing the boundaries of what can be achieved by different types of ascending auctions: item prices vs. bundle prices, anonymous prices vs. personalized prices, deterministic vs. non-deterministic, ascending vs. descending, preference elicitation vs. full elicitation, adaptive vs. non-adaptive, and single trajectory vs. multi trajectory. Two of our main results show that neither ascending item-price auctions nor ascending anonymous bundle-price auctions can determine the optimal allocation among general valuations. This justifies the use of personalized bundle prices in iterative combinatorial auctions like the FCC spectrum auctions.},
url = {/files/dp382.pdf},
author = {Nisan, Liad Blumrosen and Noam}
}
@booklet {robertjaumann-catsp2005,
title = {Conditioning and the Sure-Thing Principle},
journal = {Discussion Papers},
number = {393},
year = {2005},
month = {6},
abstract = {This paper undertakes a careful examination of the concept of conditional probability and its use. The ideas are then applied to resolve a conceptual puzzle related to Savage{\textquoteright}s "Sure-Thing Principle."},
url = {/files/dp393.pdf},
author = {Robert J. Aumann, Sergiu Hart and Motty Perry}
}
@booklet {aumann-c2005,
title = {Consciousness},
journal = {Discussion Papers},
number = {391},
year = {2005},
month = {5},
publisher = {In Life as We Know It, Edited by J. Seckbach, Springer (2006), 555-564},
abstract = {Consciousness is the last great frontier of science. Here we discuss what it is, how it differs fundamentally from other scientific phenomena, what adaptive function it serves, and the difficulties in trying to explain how it works. The emphasis is on the adaptive function.},
url = {/files/ consciousness.pdf},
author = {R. J. Aumann}
}
@booklet {thomaskittsteiner-dvisa2005,
title = {Declining Valuations in Sequential Auctions},
journal = {Discussion Papers},
number = {385},
year = {2005},
month = {2},
abstract = {We analyze an independent private values model where a number of objects are sold in sequential first- and second-price auctions. Bidders have unit demand and their valuation for an object is decreasing in the rank number of the auction in which it is sold. We derive efficient equilibria if prices are announced after each auction or if no information is given to bidders. We show that the sequence of prices constitutes a supermartingale. Even if we correct for the decrease in valuations for objects sold in later auctions we find that average prices are declining.},
url = {/files/dp385.pdf},
author = {Thomas Kittsteiner, Jorg Nikutta and Winter, Eyal}
}
@booklet {klement-teoswmsmdl2005,
title = {Economics of Shame: Why More Shaming May Deter Less, The},
journal = {Discussion Papers},
number = {401},
year = {2005},
month = {8},
abstract = {This paper investigates the effectiveness of shaming penalties. It establishes that there may be an inverse relation between the rate of shaming penalties and their deterrent effects - the more people are shamed the less deterring shaming penalties become. This conclusion is based on a search model in which the costs of searching for law-abiding partners increase with the rate of shaming, and may lead to lower expected sanction for offenders. The inverse relation between the rate of shaming penalties and their effectiveness is later used to show that increasing the probability of detection, increasing the magnitude of shaming penalties or reducing the number of wrongful acquittals does not necessarily increase the deterrent effects of shaming penalties (and may, in fact, decrease these effects).},
url = {/files/dp401.pdf},
author = {Klement, Alon Harel and Alon}
}
@booklet {maschler-eacf2005,
title = {Encouraging a Coalition Formation},
journal = {Discussion Papers},
number = {392},
year = {2005},
month = {5},
publisher = {Theory and Decision 56 (2004), 25-34},
abstract = {A 4-person quota game is analyzed and discussed, in which players find it beneficial to pay others, in order to encourage favorable coalition structure.},
url = {/files/dp392.pdf},
author = {Michael Maschler}
}
@booklet {wexler-edflpigwmbie2005,
title = {Evolutionary Dynamics for Large Populations in Games with Multiple Backward Induction Equilibria},
journal = {Discussion Papers},
number = {402},
year = {2005},
month = {9},
abstract = {This work follows "Evolutionary dynamics and backward induction" (Hart [2000]) in the study of dynamic models consisting of selection and mutation, when the mutation rate is low and the populations are large. Under the assumption that there is a single backward induction (or subgame perfect) equilibrium of a perfect information game, Hart [2000] proved that this point is the only stable state. In this work, we examine the case where there are multiple backward induction equilibria.},
url = {/files/dp402.pdf},
author = {Tomer Wexler}
}
@booklet {neyman-eoosimgwii2005,
title = {Existence of Optimal Strategies in Markov Games with Incomplete Information},
journal = {Discussion Papers},
number = {413},
year = {2005},
month = {12},
abstract = {The existence of a value and optimal strategies is proved for the class of two-person repeated games where the state follows a Markov chain independently of players{\textquoteright} actions and at the beginning of each stage only player one is informed about the state. The results apply to the case of standard signaling where players{\textquoteright} stage actions are observable, as well as to the model with general signals provided that player one has a nonrevealing repeated game strategy. The proofs reduce the analysis of these repeated games to that of classical repeated games with incomplete information on one side.},
url = {/files/dp413.pdf},
author = {Abraham Neyman}
}
@booklet {okada-gosseanbr2005,
title = {Growth of Strategy Sets, Entropy, and Nonstationary Bounded Recall},
journal = {Discussion Papers},
number = {411},
year = {2005},
month = {11},
abstract = {One way to express bounded rationality of a player in a game theoretic models is by specifying a set of feasible strategies for that player. In dynamic game models with finite automata and bounded recall strategies, for example, feasibility of strategies is determined via certain complexity measures: the number of states of automata and the length of recall. Typically in these models, a fixed finite bound on the complexity is imposed resulting in finite sets of feasible strategies. As a consequence, the number of distinct feasible strategies in any subgame is finite. Also, the number of distinct strategies induced in the first T stages is bounded by a constant that is independent of T. In this paper, we initiate an investigation into a notion of feasibility that reflects varying degree of bounded rationality over time. Such concept must entail properties of a strategy, or a set of strategies, that depend on time. Specifically, we associate to each subset {\textasciidieresis}i of the full (theoretically possible) strategy set a function yi from the set of positive integers to itself. The value {\textasciicircum}i(t) represents the number of strategies in {\textasciidieresis}i that are distinguishable in the first t stages. The set {\textasciidieresis}i may contain infinitely many strategies, but it can differ from the fully rational case in the way yi grows reflecting a broad implication of bounded rationality that may be alleviated, or intensified, over time. We examine how the growth rate of yi affects equilibrium outcomes of repeated games. In particular, we derive an upper bound on the individually rational payoff of repeated games where player 1, with a feasible strategy set {\textasciidieresis}1, plays against a fully rational player 2. We will show that the derived bound is tight in that a specific, and simple, set {\textasciidieresis}1 exists that achieves the upper bound. As a special case, we study repeated games with non-stationary bounded recall strategies where the length of recall is allowed to vary in the course of the game. We will show that a player with bounded recall can guarantee the minimax payoff of the stage game even against a player with full recall so long as he can remember, at stage t, at least K log(t) stages back for some constant K >0. Thus, in order to guarantee the minimax payoff, it suffices to remember only a vanishing fraction of the past. A version of the folk theorem is provided for this class of games.},
url = {/files/dp411.pdf},
author = {Okada, Abraham Neyman and Daijiro}
}
@booklet {feldman-iwabas2005,
title = {Implementation with a Bounded Action Space},
journal = {Discussion Papers},
number = {412},
year = {2005},
month = {12},
abstract = {While traditional mechanism design typically assumes isomorphism between the agents{\textquoteright} type- and action spaces, in many situations the agents face strict restrictions on their action space due to, e.g., technical, behavioral or regulatory reasons. We devise a general framework for the study of mechanism design in single-parameter environments with restricted action spaces. Our contribution is threefold. First, we characterize sufficient conditions under which the information-theoretically optimal social-choice rule can be implemented in dominant strategies, and prove that any multilinear social-choice rule is dominant-strategy implementable with no additional cost. Second, we identify necessary conditions for the optimality of action-bounded mechanisms, and fully characterize the optimal mechanisms and strategies in games with two players and two alternatives. Finally, we prove that for any multilinear social-choice rule, the optimal mechanism with k actions incurs an expected loss of O(1/k2) compared to the optimal mechanisms with unrestricted action spaces. Our results apply to various economic and computational settings, and we demonstrate their applicability to signaling games, public-good models and routing in networks.},
url = {/files/dp412.pdf},
author = {Feldman, Liad Blumrosen and Michal}
}
@booklet {gavison-iosiaajdsh2005,
title = {Implications of Seeing Israel as a Jewish (and Democratic) State (in Hebrew)},
journal = {Discussion Papers},
number = {383},
year = {2005},
month = {2},
publisher = {Published as "Meaning and Implications of the Jewishness of Israel", in The Jewishness of Israel, Ravitzky and Stern (eds.), IDI (2007), 107-178},
abstract = {The paper starts from the fact that Israel is described as a {\textquoteright}Jewish and Democratic{\textquoteright} state. It opens with a rejection of some preliminary charges that Israel cannot be both Jewish and democratic or that maintaining its Jewish particularity is in principle unjustified. The main part of the paper analyzes various issues, such as the right to participate in elections, immigration, and education, to show what arrangements may be required by a wish to maintain the Jewishness of the state while, at the same time, respecting the human rights of all its residents.},
url = {/files/dp383.pdf},
author = {Ruth Gavison}
}
@booklet {hart-aiwra2005,
title = {An Interview with Robert Aumann},
journal = {Discussion Papers},
number = {386},
year = {2005},
month = {2},
publisher = {Macroeconomic Dynamics 9 (2005), 683-740. Also in Inside the Economist{\textquoteright}s Mind: The History of Modern Economic Thought, as Explained by Those Who Produced It, William A. Barnett and Paul Samuelson (eds.), Blackwell Publishing},
abstract = {Who is Robert Aumann? Is he an economist or a mathematician? A rational scientist or a deeply religious man? A deep thinker or an easygoing person? These seemingly disparate qualities can all be found in Aumann; all are essential facets of his personality. A pure mathematician who is a renowned economist, he has been a central figure in developing game theory and establishing its key role in modern economics. He has shaped the field through his fundamental and pioneering work, work that is conceptually profound, and much of it mathematically deep. He has greatly influenced and inspired many people: his students, collaborators, colleagues, and anyone who has been excited by reading his papers or listening to his talks. Aumann promotes a unified view of rational behavior, in many different disciplines: chiefly economics, but also political science, biology, computer science, and more. To mention just a few of the areas of Aumann{\textquoteright}s groundbreaking work: perfect competition, repeated games, correlated equilibrium, interactive knowledge and rationality, and coalitions and cooperation. But Aumann is not just a theoretical scholar, closed in his ivory tower. He is interested in real-life phenomena and issues, to which he applies insights from his research. He is a devoutly religious man; and he is one of the founding fathers-and a central and most active member-of the multidisciplinary Center for the Study of Rationality at the Hebrew University in Jerusalem. Aumann enjoys skiing, mountain climbing, and cooking-no less than working out a complex economic question or proving a deep theorem. He is a family man, a very warm and gracious person-of an extremely subtle and sharp mind. This interview catches a few glimpses of Robert Aumann{\textquoteright}s fascinating world. It was held in Jerusalem on three consecutive days in September of 2004. I hope the reader will learn from it and enjoy it as much as we two did.},
url = {/files/ aumann.html},
author = {Sergiu Hart}
}
@booklet {gavison-ticplaajrd2005,
title = {Israeli Constitutional Process: Legislative Ambivalence and Judicial Resolute Drive, The},
journal = {Discussion Papers},
number = {380},
year = {2005},
month = {2},
publisher = {Published as "Legislatures and the Quest for a Constitution: The Case of Israel", Review of Constitutional Studies 11 (2006), 345-400},
abstract = {The paper analyses the development of the constitutional process in Israel since 1950, and especially since the 1992 basic laws. It argues that this process should be viewed within a frameworks distinguishing between three stages of constitution-making: the initial enactment of a constitution, amendments of the constitution, and application and interpretation of the constitution. The distinction between stages has institutional implications. Constitution-making should be primarily done by constituent assemblies. Regular legislatures are a second choice. The process should seek broad consensus, and involve big compromises between segments of the public. Amendments should also be undertaken by legislatures with broad consensus, but they can be more local, and their ratification procedures may be less demanding. Application and interpretation should be done in an ongoing way by all branches of government. Courts are authoritative interpreters but they do not necessarily have the final word on the constitution. When we study the Israeli process we see that does not conform to this model at all. It reflects judicial involvement in all stages, including the initial making of the constitution. There is thus a {\textquoteright}legitimacy deficit{\textquoteright} in the constitutional process, which may perpetuate the current instability in the constitutional situation.},
url = {/files/db380.pdf},
author = {Ruth Gavison}
}
@booklet {bengreiner-ltdtucaditde2005,
title = {Let the Dummy Talk! Unilateral Communication and Discrimination in Three-Person Dictator Experiments},
journal = {Discussion Papers},
number = {396},
year = {2005},
month = {8},
abstract = {To explain why pre-play communication increases cooperation in games, one refers to a) strategic causes such as efficient communication or reputation effects, and b) changes in the utilities due to social processes. Hitherto experimental support for both explanations is mixed and confounded. Our experimental design eliminates all strategic factors and allows to focus on the effects of communication processes. We clearly find social effects, but none of revealed anonymity or salient communication. The social processes invoked are very heterogeneous but not irregular for different communicators.},
url = {/files/db396.pdf},
author = {Ben Greiner, Werner Guth and Ro i Zultan}
}
@booklet {sheshinski-laas2005,
title = {Longevity and Aggregate Savings},
journal = {Discussion Papers},
number = {403},
year = {2005},
month = {9},
abstract = {For the last fifty years, countries in Asia and elsewhere witnessed a surge in aggregate savings per capita. Many empirical studies attribute this trend to the highly significant increases in life longevity of the populations of these countries. Some argue that the rise in savings is short-run, to be eventually dissipated by the dissaving of the elderly, whose proportion in the population rises along with longevity. This paper examines whether these conclusions are supported by economic theory. A model of life cycle decisions with uncertain survival is used to derive individuals{\textquoteright}savings and chosen retirement age response to changes in longevity. Conditions on the age-profile of improvements in survival probabilities are shown to be necessary in order to predict the direction of this response (the uneven history of age specific improvements in longevity is recorded by Cutler (2004)). Population theory (e.g. Coale (1952)) is used to derive the dependence of the steady-state population age density on longevity. This, in turn, enables the explicit aggregation of individual response functions and a comparative steady-state analysis. Sufficient conditions for a sustainable positive effect of increased longevity on aggregate savings per capita are then derived. The importance of the availability of insurance markets is briefly discussed.},
url = {/files/db403.pdf},
author = {Eytan Sheshinski}
}
@booklet {aumann-moiak2005,
title = {Musings on Information and Knowledge},
journal = {Discussion Papers},
number = {389},
year = {2005},
month = {3},
publisher = {Econ Journal Watch 2 (2005), 88-96},
abstract = {An invited contribution to a symposium on Information and Knowledge in Economics, to appear in Econ Journal Watch. Topics discussed include the distinction between information and knowledge; awareness; logical omniscience; the cost of calculation; semantic and syntactic models of knowledge, and the equivalence between them; and common knowledge of the model. Finally, some aspects of the symposium contributions of Ken Binmore, Jim Friedman, and Eric Rasmusen are discussed.},
url = {/files/db389.pdf},
author = {Robert J. Aumann}
}
@booklet {peters-ncroeftlm2005,
title = {Nash Consistent Representation of Effectivity Functions Through Lottery Models},
journal = {Discussion Papers},
number = {404},
year = {2005},
month = {9},
publisher = {Games and Economic Behavior 65 (2009), 503-515.},
abstract = {Effectivity functions for finitely many players and alternatives are considered. It is shown that every monotonic and superadditive effectivity function can be augmented with equal chance lotteries to a finite lottery model - i.e., an effectivity function that preserves the original effectivity in terms of supports of lotteries - which has a Nash consistent representation. In other words, there exists a finite game form which represents the lottery model and which has a Nash equilibrium for any profile of utility functions, where lotteries are evaluated by their expected utility. No additional condition on the original effectivity function is needed.},
url = {/files/db404.pdf},
author = {Peters, Bezalel Peleg and Hans}
}
@booklet {gavison-tnsaiociasoc2005,
title = {Neve-Shalom/Wahat-Al-Salam School: An Island of Coexistence in a Sea of Conflict, The},
journal = {Discussion Papers},
number = {379},
year = {2005},
month = {2},
abstract = {These two papers (one in English and one in Hebrew) describe the unique educational experience of the Neve Shalom school, which is a fully integrated Jewish-Arab school within a system where Jews and Arabs regularly study in separate schools and languages. They are similar but have different emphases since the audiences are expected to be different in terms of background knowledge and familiarity with facts and situations. The paper studies the way the school handles issues of culture, language, religion and national identity. It then analyzes the school and makes suggestions concerning both the improvement of the effectiveness of the school itself and what can be learned from its experience to the educational system in Israel as a whole.},
url = {/files/db379.pdf},
author = {Ruth Gavison}
}
@booklet {kalai-nsacisct2005,
title = {Noise Sensitivity and Chaos in Social Choice Theory},
journal = {Discussion Papers},
number = {399},
year = {2005},
month = {8},
abstract = {In this paper we study the social preferences obtained from monotone neutral social welfare functions for random individual preferences. It turns out that there are two extreme types of behavior. On one side, there are social welfare functions, such as the majority rule, that lead to stochastic stability of the outcome in terms of perturbations of individual preferences. We identify and study a class of social welfare functions that demonstrate an extremely different type of behavior which is a completely chaotic: they lead to a uniform probability distribution on all possible social preference relations and, for every{\textquoteright}{\textquoteright}>0, if a small fraction{\textquoteright}{\textquoteright} of individuals change their preferences (randomly) the correlation between the resulting social preferences and the original ones tends to zero as the number of individuals in the society increases. This class includes natural multi-level majority rules.},
url = {/files/db399.pdf},
author = {Gil Kalai}
}
@booklet {sagi-onwtkanwtiocrpgt2005,
title = {On Not Wanting to Know and Not Wanting to Inform Others: Choices Regarding Predictive Genetic Testing},
journal = {Discussion Papers},
number = {406},
year = {2005},
month = {9},
publisher = {Risk Decision and Policy 9 (2004), 317- 336},
abstract = {Recent advancement in genetics testing for late-onset diseases raises fundamental decision dilemmas. The first study surveyed people{\textquoteright}s willingness to undergo predictive testing to find out about their own predisposition for certain incurable, late-onset diseases. The second study investigated the respondents{\textquoteright} willingness to be tested as a function of the base rate of the disease, test diagnosticity, and the availability of treatment for the disease. In addition we surveyed (in the first study) people{\textquoteright}s willingness to disclose to others personal information about their genetic predisposition. The findings show that people often prefer not to know, as if they are choosing "protective ignorance". Respondents{\textquoteright} verbal justifications of their choices were also analyzed. Respondents offered emotional, cognitive-instrumental, and strategic reasons for their preferences. The findings are compared with other issues in behavioral decision theory, including attitudes towards uncertainty and desire for control. The implications of the findings for policies and legislation on genetic tests are also considered.},
url = {/files/db406.pdf},
author = {Sagi, Ilan Yaniv and Michal}
}
@booklet {yaniv-ropaiab2005,
title = {Receiving Other People{\textquoteright}s Advice: Influence and Benefit},
journal = {Discussion Papers},
number = {405},
year = {2005},
month = {9},
publisher = {Organizational Behavior and Human Decision Processes 93 (2004), 1-13},
abstract = {Seeking advice is a basic practice in making real life decisions. Until recently, however, little attention has been given to it in either empirical studies or theories of decision making. The studies reported here investigate the influence of advice on judgment and the consequences of advice use for judgment accuracy. Respondents were asked to provide final judgments on the basis of their initial opinions and advice presented to them. The respondents{\textquoteright} weighting policies were inferred. Analysis of the these policies show that (a) the respondents tended to place a higher weight on their own opinion than on the advisor{\textquoteright}s opinion (the self/other effect); (b) more knowledgeable individuals discounted the advice more; (c) the weight of advice decreased as its distance from the initial opinion increased; and (d) the use of advice improved accuracy significantly, though not optimally. A theoretical framework is introduced which draws in part on insights from the study of attitude change to explain the influence of advice. Finally the usefulness of advice for improving judgment accuracy is considered.},
url = {/files/dp405.pdf},
author = {Yaniv, Ilan}
}
@booklet {toxvaerd-rbatc2005,
title = {Record Breaking and Temporal Clustering},
journal = {Discussion Papers},
number = {395},
year = {2005},
month = {6},
abstract = {Casual observation suggests that athletics records tend to cluster over time. After prolonged periods without new records, a record breaking performance spurs other athletes to increase effort and thereby repeatedly set new standards. Subsequently, record breaking subsides and the pattern repeats itself. The clustering hypothesis is tested for the mile run, the marathon, the world hour record and long jump. For all four disciplines, the hypothesis of non-clustering is rejected at the 4 level or below. A theoretical rationale for this phenomenon is provided through a model of social learning under limited awareness. The agents are assumed to be unaware of the true limits to performance and to take the current record as the upper bound. The observation of a record breaking achievement spurs the agents to try harder and thus temporarily increase the probability of new records. Subsequently, record breaking trails off and the process is repeated.},
url = {/files/dp395.pdf},
author = {Flavio Toxvaerd}
}
@booklet {abbamkrieger-ssraf2005,
title = {Select Sets: Rank and File},
journal = {Discussion Papers},
number = {388},
year = {2005},
month = {3},
publisher = {Annals of Applied Probability 17 (2007), 360-385},
abstract = {In many situations, the decision maker observes items in sequence and needs to determine whether or not to retain a particular item immediately after it is observed. Any decision rule creates a set of items that are selected. We consider situations where the available information is the rank of a present observation relative to its predecessors. Certain "natural" selection rules are investigated. Theoretical and Monte Carlo results are presented pertaining to the evolution of the number of items selected, measures of their quality and the time it would take to amass a group of a given size. A comparison between rules is made, and guidelines to the choice of good procedures are offered.},
url = {/files/dp388.pdf},
author = {Abba M. Krieger , Moshe Pollak and Ester Samuel-Cahn}
}
@booklet {gorodeisky-some2005,
title = {Stability of Mixed Equilibria},
journal = {Discussion Papers},
number = {397},
year = {2005},
month = {8},
abstract = {We consider stability properties of equilibria in stochastic evolutionary dynamics. In particular, we study the stability of mixed equilibria in strategic form games. In these games, when the populations are small, all strategies may be stable. We prove that when the populations are large, the unique stable outcome of best-reply dynamics in 2 x 2 games with a unique Nash equilibrium that is completely mixed is the mixed equilibrium. The proof of this result is based on estimating transition times in Markov chains.},
url = {/files/dp397.pdf},
author = {Ziv Gorodeisky}
}
@booklet {kalai-tonaqc2005,
title = {Thoughts on Noise and Quantum Computation},
journal = {Discussion Papers},
number = {400},
year = {2005},
month = {8},
abstract = {{\textquoteright}We will try to explore, primarily from the complexity-theoretic point of view, limitations of error-correction and fault-tolerant quantum computation. We consider stochastic models of quantum computation on n qubits subject to noise operators that are obtained as products of tiny noise operators acting on a small number of qubits. We conjecture that for realistic random noise operators of this kind there will be substantial dependencies between the noise on individual qubits and, in addition, the dependence structure of the noise acting on individual qubits will necessarily depend (systematically) on the dependence structure of the qubits themselves. The main hypothesis of this paper is that these properties of noise are sufficient to reduce quantum computation to probabilistic classical computation. Some potentially relevant mathematical issues and problems will be described. Our line of thought appears to be related to that of physicists Alicki, Horodecki, Horodecki and Horodecki [AHHH].{\textquoteright}},
url = {/files/dp400.pdf},
author = {Gil Kalai}
}
@booklet {safra-tpaiwspfmcsae2005,
title = {Threshold Phenomena and Influence, with Some Perspectives from Mathematics, Computer Science, and Economics},
journal = {Discussion Papers},
number = {398},
year = {2005},
month = {8},
url = {/files/dp398.pdf},
author = {Safra, Gil Kalai and Shmuel}
}
@booklet {nitzan-tce2005,
title = {Tight Correlated Equilibrium},
journal = {Discussion Papers},
number = {394},
year = {2005},
month = {6},
abstract = {A correlated equilibrium of a strategic form n-person game is called tight if all the incentive constraints are satisfied as equalities. The game is called tight if all of its correlated equilibria are tight. This work shows that the set of tight games has positive measure.},
url = {/files/dp394.pdf},
author = {Noa Nitzan}
}
@booklet {shahardobzinski-trmfca2005,
title = {Truthful Randomized Mechanisms for Combinatorial Auctions},
journal = {Discussion Papers},
number = {408},
year = {2005},
month = {11},
abstract = {We design two computationally-efficient incentive-compatible mechanisms for combinatorial auctions with general bidder preferences. Both mechanisms are randomized, and are incentive-compatible in the universal sense. This is in contrast to recent previous work that only addresses the weaker notion of incentive compatibility in expectation. The first mechanism obtains an O({\textquoteright}{\textasciicircum}\v sm)-approximation of the optimal social welfare for arbitrary bidder valuations {\textendash} this is the best approximation possible in polynomial time. The second one obtains an O(log2 m)-approximation for a subclass of bidder valuations that includes all submodular bidders. This improves over the best previously obtained incentive-compatible mechanism for this class which only provides an O({\textquoteright}{\textasciicircum}\v sm)-approximation.},
url = {/files/dp408.pdf},
author = {Shahar Dobzinski, Noam Nisan and Schapira, Michael}
}
@booklet {ein-ya-ugttismtlm2005,
title = {Using Game Theory to Increase Students{\textquoteright} Motivation to Learn Mathematics},
journal = {Discussion Papers},
number = {384},
year = {2005},
month = {2},
publisher = {Proceedings of the 4th Mediterranean Conference on Mathematics Education 2 (2005), 515-520},
abstract = {This paper reports an attempt to teach game theory in order to increase students{\textquoteright} motivation to learn mathematics. A course in game theory was created in order to introduce students to new mathematical content presented in a different way.},
url = {/files/dp384.pdf},
author = {Gura Ein-Ya}
}
@booklet {dreze-waisadhsypawsye2005,
title = {When All Is Said and Done, How Should You Play and What Should You Expect?},
journal = {Discussion Papers},
number = {387},
year = {2005},
month = {3},
publisher = {Published as "Rational Expectations in Games," American Economic Review 98 (2008), 72-86},
abstract = {Modern game theory was born in 1928, when John von Neumann published his Minimax Theorem. This theorem ascribes to all two-person zero-sum games a value-what rational players may expect-and optimal strategies-how they should play to achieve that expectation. Seventy-seven years later, strategic game theory has not gotten beyond that initial point, insofar as the basic questions of value and optimal strategies are concerned. Equilibrium theories do not tell players how to play and what to expect; even when there is a unique Nash equilibrium, it it is not at all clear that the players "should" play this equilibrium, nor that they should expect its payoff. Here, we return to square one: abandon all ideas of equilibrium and simply ask, how should rational players play, and what should they expect. We provide answers to both questions, for all n-person games in strategic form.},
url = {/files/ 86.pdf},
author = {Dreze, R. J. Aumann and J. H.}
}
@booklet {samuel-cahn-wsysawdygssp2005,
title = {When Should You Stop and What Do You Get? Some Secretary Problems},
journal = {Discussion Papers},
number = {407},
year = {2005},
month = {10},
publisher = {Published as "Optimal Stopping for I.I.D. Random Variables", Sequential Analysis 26 (2007), 395-401},
abstract = {{A version of a secretary problem is considered: Let Xj},
url = {/files/dp407.pdf},
author = {Ester Samuel-Cahn}
}
@booklet {flekser-wtebabwhtfemitp2005,
title = {With the Eye Being a Ball, What Happens to Fixational Eye Movements in the Periphery?},
journal = {Discussion Papers},
number = {390},
year = {2005},
month = {5},
abstract = {Although the fact that the eye is moving constantly has been known for a long time, the role of fixational eye movements (FEM) is still in dispute. Whatever their role, it is structurally clear that, since the eye is a ball, the size of these movements diminishes for locations closer to the poles. Here we propose a new perspective on the role of FEM from which we derive a prediction for a three-way interaction of a stimulus{\textquoteright} orientation, location, and spatial frequency. Measuring time-to-disappearance for gratings located in the periphery we find that, as predicted, gratings located to the left and right of fixation fade faster when horizontal than when vertical in low spatial frequencies and faster when vertical than when horizontal in high spatial frequencies. The opposite is true for gratings located above and below fixation.},
url = {/files/dp390.pdf},
author = {Flekser, Judith Avrahami and Oren}
}
@booklet {hart-ah2004,
title = {Adaptive Heuristics},
journal = {Discussion Papers},
number = {372},
year = {2004},
month = {9},
publisher = {Econometrica 73 (2005), 1401-1430},
abstract = {We exhibit a large class of simple rules of behavior, which we call adaptive heuristics, and show that they generate rational behavior in the long run. These adaptive heuristics are based on natural regret measures, and may be viewed as a bridge between rational and behavioral viewpoints. The results presented here, taken together, establish a solid connection between the dynamic approach of adaptive heuristics and the static approach of correlated equilibria.},
url = {http://heurist.html},
author = {Sergiu Hart}
}
@booklet {ilanguttman-atnatocem2004,
title = {Adding the Noise: A Theory of Compensation-Driven Earnings Management},
journal = {Discussion Papers},
number = {355},
year = {2004},
month = {3},
abstract = {Empirical evidence suggests that the distribution of earnings reports is discontinuous. This is puzzling since the distribution of true earnings is likely to be continuous. We present a model that rationalizes this phenomenon. In our model, managers report their earnings to rational investors, who price the stock accordingly. We assume that misreporting is costly, but since managers{\textquoteright} compensation is based on the stock price, they may want to manipulate the reported earnings. The model fits into the general framework of signaling games with a continuum of types. The conventional equilibrium in this game is fully revealing (e.g. Stein 1989), and does not explain the observed discontinuity of earnings reports. We show that a partially pooling equilibrium exists in such games as well, and it generates an endogenous discontinuity in reports. By pooling reports of different types, the informed manager introduces "home-made" noise into his report. The resulting vagueness enables the manager to reduce the manipulation costs. While a priori pooling looks manipulative, it is actually a way to reduce earnings management. The empirical implications of our model relate earnings management and price reaction to price- and earnings-based compensation, growth opportunities of the firm, underlying volatility, and the stringency of accounting rules. We show that this equilibrium arises due to stock-based compensation of the managers, and does not arise when they are paid based on their earnings directly. Finally, we present a general version of this model describing the behavior of biased experts in many real-life situations.},
url = {/files/dp355.pdf},
author = {Ilan Guttman, Ohad Kadan and Eugene Kandel}
}
@booklet {rjaumann-aotcr2004,
title = {Analyses of the Gans Committee Report},
journal = {Discussion Papers},
number = {365},
year = {2004},
month = {7},
abstract = {This document contains four separate analyses, each with a different author, of the "Gans" committee report on the Bible codes (DP 364 of the Center for the Study of Rationality, June 2004). The analyses appear in alphabetical order of the authors{\textquoteright} names. Three of the authors were members of the committee; one, Doron Witztum, is active in Bible codes research. Two of the analyses-by Aumann and by Furstenberg-support the report of the committee; the other two-by Lapides and by Witztum-do not. This document contains material that was generated after the results of the committee{\textquoteright}s experiments became known; other than reporting the numerical results themselves,dp 364 contains only material generated before they became known.},
url = {/files/dp365.pdf},
author = {R. J. Aumann, H. Furstenberg, I. Lapides and D. Witztum}
}
@booklet {dreze-asr2004,
title = {Assessing Strategic Risk},
journal = {Discussion Papers},
number = {361},
year = {2004},
month = {5},
abstract = {In recent decades, the concept of subjective probability has been increasingly applied to an adversary s choices in strategic games. A careful examination reveals that the standard construction of subjective probabilities does not apply in this context. We show how the difficulty may be overcome by means of a different construction.},
url = {/files/dp361.pdf},
author = {Dreze, R. J. Aumann and J. H.}
}
@booklet {rumafalk-asbfpoas2004,
title = {Average Speed Bumps: Four Perspectives on Averaging Speeds},
journal = {Discussion Papers},
number = {367},
year = {2004},
month = {7},
url = {/files/dp367.pdf},
author = {Ruma Falk, Avital Lann and Shmuel Zamir}
}
@booklet {sudholter-bsovg2004,
title = {Bargaining Sets of Voting Games},
journal = {Discussion Papers},
number = {376},
year = {2004},
month = {12},
publisher = {(revised indp $\#$410)},
abstract = {Let A be a finite set of m {\textyen} 3 alternatives, let N be a finite set of n {\textyen} 3 players and let Rn be a profile of linear preference orderings on A of the players. Throughout most of the paper the considered voting system is the majority rule. Let uN be a profile of utility functions for RN. Using $\pm$-effectiveness we define the NTU game VuN and investigate its Aumann-Davis-Maschler and Mas-Colell bargaining sets. The first bargaining set is nonempty for m = 3 and it may be empty for m{\textyen} 4. Moreover, in a simple probabilistic model, for fixed m, the probability that the Aumann-Davis-Maschler bargaining set is nonempty tends to one if n tends to infinity. The Mas-Colell bargaining set is nonempty for m 5 and it may be empty for m {\textyen} 6. Moreover, we prove the following startling result: The Mas-Colell bargaining set of anysimple majority voting game derived from the k-th replication of RN is nonempty, provided that k {\textyen} n + 2.We also compute the NTU games which are derived from choice by plurality voting and approval voting, and we analyze some interesting examples.},
url = {/files/dp376.pdf},
author = {Sudholter, Bezalel Peleg and Peter}
}
@booklet {peleg-ber2004,
title = {Binary Effectivity Rules},
journal = {Discussion Papers},
number = {378},
year = {2004},
month = {12},
publisher = {Review of Economic Design 10 (2006), 167-181},
abstract = {A social choice rule is a collection of social choice correspondences, one for each agenda. An effectivity rule is a collection of effectivity functions, one for each agenda. We prove that every monotonic and superadditive effectivity rule is the effectivity rule of some social choice rule. A social choice rule is binary if it is rationalized by an acyclic binary relation. The foregoing result motivates our definition of a binary effectivity rule as the effectivity rule of some binary social choice rule. A binary social choice rule is regular if it satisfies unanimity, monotonicity, and independence of infeasible alternatives. A binary effectivity rule is regular if it is the effectivity rule of some regular binary social choice rule. We characterize completely the family of regular binary effectivity rules. Quite surprisingly, intrinsically defined von Neumann-Morgenstern solutions play an important role in this characterization.},
url = {/files/dp378.pdf},
author = {Peleg, Hans Keiding and Bezalel}
}
@booklet {michaelgoldstein-bcaitp2004,
title = {Brokerage Commissions and Institutional Trading Patterns},
journal = {Discussion Papers},
number = {356},
year = {2004},
month = {3},
abstract = {Why do brokers charge per-share commissions to institutional traders? What determines the commission charge? We examine commissions and order flow for a sample of institutional orders and find that most per-share commissions are concentrated at only a few price points, primarily 5 and 6 cents per share. Further, we find that the prior-period commission, rather than execution costs, is the strongest determinant of next period{\textquoteright}s commission. These results are inconsistent with negotiation of commissions on an order-by-order basis or with the impression of a continuous transaction cost that is deduced from the distribution of percentage commissions, suggesting that commissions are not a marginal cost of execution. We also find that institutional clients concentrate their order flow with a small set of brokers, and that small institutions concentrate more than large institutions. Collectively, our results suggest that brokers and their institutional clients enter into long-term agreements where the per-share commission is constant, and the order flow routed to a particular broker is used to maintain the required payment for an institution{\textquoteright}s desired level of service. Commissions, therefore, constitute a convenient way of charging a predetermined fixed fee for broker services.},
url = {/files/dp356.pdf},
author = {Michael Goldstein, Paul Irvine, Eugene Kandel and Zvi Wiener}
}
@booklet {dgranot-cpgoacoeg2004,
title = {Chinese Postman Games on a Class of Eulerian Graphs},
journal = {Discussion Papers},
number = {366},
year = {2004},
month = {7},
abstract = {The extended Chinese postman (CP) enterprize is induced by a connected and undirected graph G. A server is located at some fixed vertex of G, to be referred to as the post office. Each player resides in a single edge, and each edge contains at most one player. Thus, some of the edges can be public . Each edge has a cost and a prize attached to it. The players need some service, e.g., mail delivery, which requires the server to travel from the post office and visit all edges wherein players reside, before returning to the post office. The server collects the prize attached to an edge upon the first traversal of this edge, but the cost of an edge is incurred every time it is traversed. The cost of a cheapest tour for each coalition defines a CP cost game. The issue is how to allocate, among the players, the cost that the server incurs. We study the class of extended CP enterprizes which are induced by Eulerian graphs satisfying two properties: The 4-cut property (Definition 4.4) and completeness (Definition 4.8). For this class we prove that the core, resp., the nucleolus when the core is not empty, are Cartesian products of the cores, resp., nucleoli of CP enterprizes whose graphs are simple cycles generated from G by identifying therein the end points of each elementary path (Definition 4.3). Finally, for the class of extended complete Eulerian graphs having the 4-cut property, we are able to test core membership in O(n) time, and when the core is not empty, we show how to calculate the nucleolus in O(n^2) time, n being the number of players.},
url = {/files/dp366.pdf},
author = {D. Granot, H. Hamers, J. Kuipers and M. Maschler}
}
@booklet {zvikaneeman-cao2004,
title = {Corruption and Openness},
journal = {Discussion Papers},
number = {353},
year = {2004},
month = {3},
abstract = {We report an intriguing empirical observation. The relationship between corruption and output depends on the economy{\textquoteright}s degree of openness: in open economies, corruption and GNP per capita are strongly negatively correlated; but in closed economies, there is no relationship at all. This stylized fact is robust to a variety of different empirical specifications. In particular, the same basic pattern persists if we use alternative measures of openness, if we focus on different time periods, if we restrict the sample to include only highly corrupt countries, if we restrict attention to specific geographic areas or to poor countries, and if we allow for the possible endogeneity of both the corruption and openness measures. We find that the extent to which corruption affects output is determined primarily by the degree of financial openness. The difference between closed and open economies is mainly due to the different effect of corruption on capital accumulation. We present a model, consistent with these findings, in which the main channel through which corruption affects output is capital drain.},
url = {/files/dp353.pdf},
author = {Zvika Neeman, M. Daniele Paserman and Avi Simhon}
}
@booklet {kareev-ddqiwtsoissviatloln2004,
title = {Does Decision Quality (Always) Increase with the Size of Information Samples? Some Vicissitudes in Applying the Law of Large Numbers},
journal = {Discussion Papers},
number = {347},
year = {2004},
month = {1},
publisher = {Journal of Experimental Psychology: Learning, Memory and Cognition 32 (2006), 883-903},
abstract = {Adaptive decision-making requires that environmental contingencies between decision options and their relative advantages and disadvantages be assessed accurately and quickly. The research presented in this article addresses the challenging notion that contingencies may be more visible from small than large samples of observations. An algorithmic account for such a ""less-is-more"" effect is offered within a threshold-based decision framework. Accordingly, a choice between a pair of options is only made when the contingency in the sample that describes the relative utility of the two options exceeds a critical threshold. Small samples - due to their instability and the high dispersion of their sampling distribution - facilitate the generation of above-threshold contingencies. Across a broad range of parameter values, the resulting small-sample advantage in terms of hits is stronger than their disadvantage in terms of false alarms. Computer simulations and experimental findings support the predictions derived from the threshold model. In general, the relative advantage of small samples is most apparent when information loss is low, when decision thresholds are high, and when ecological contingencies are weak to moderate.},
url = {/files/dp347.pdf},
author = {Kareev, Klaus Fiedler and Yaakov}
}
@booklet {cohen-teeosdisaee2004,
title = {Evolutionary Ecology of Species Diversity in Stressed and Extreme Environments, The},
journal = {Discussion Papers},
number = {352},
year = {2004},
month = {3},
url = {/files/dp352.pdf},
author = {Cohen, Dan}
}
@booklet {furstenberg-fotctitgroelsig2004,
title = {Findings of the Committee to Investigate the Gans-Inbal Results on Equidistant Letter Sequences in Genesis},
journal = {Discussion Papers},
number = {364},
year = {2004},
month = {6},
abstract = {In 1996, a committee was formed to examine the results that had been reported by H.J. Gans regarding the existence of "encoded" text in the bible foretelling events that took place many years after the Bible was written. The committee performed two additional tests in the spirit of the Gans experiments. Both tests failed to confirmed the existence of the putative code.},
url = {/files/dp364.1.pdf},
author = {Furstenberg, Robert J. Aumann and Hillel}
}
@booklet {larrygoldstein-fbiatdwa2004,
title = {Functional BRK Inequalities, and Their Duals, with Applications},
journal = {Discussion Papers},
number = {374},
year = {2004},
month = {11},
publisher = {Journal of Theoretical Probability 20, 275-293 (2007)},
abstract = {The inequality conjectured by van den Berg and Kesten in [9], and proved by Reimerin [6], states that for A and B events on S, a product of finitely many finite sets, and P any product measure on S, P(A{\textquestiondown}B) P(A)P(B), where A{\textquestiondown}B are the elementary events which lie in both A and B for {\textquoteleft}disjoint reasons.{\textquoteright} This inequality on events is the special case, for indicator functions, of the inequalityhaving the following formulation. Let X be a random vector with n independent components, each in some space Si (such as{\quotedblbase}d), and set S = {\textasciicircum}ni=1Si. Say that the function f : S {\textdagger}{\textquoteright}{\quotedblbase}depends on K {\v S}{\quotesinglbase} 1,...,n if f(x) = f(y) whenever xi = yi for all i {\textasciicircum}{\textasciicircum} K. Then for any given finite or countable collections of non-negative real valued functions f$\pm$$\pm${\textasciicircum}{\textasciicircum}A, g{\texttwosuperior}{\texttwosuperior}{\textasciicircum}{\textasciicircum}B on S which depend on K$\pm$ and L{\texttwosuperior} respectively,EsupK$\pm${\textasciicircum}\copyrightL{\texttwosuperior}={\textasciicircum}{\l}dots} f$\pm$(X) g{\texttwosuperior}(X) Esup f$\pm$(X) Esup g{\texttwosuperior}(X). Related formulations, and functional versions of the dual inequality on events by Kahn,Saks, and Smyth [4], are also considered. Applications include order statistics, assignment problems, and paths in random graphs.},
url = {/files/ Revision of September 12 2015.pdf, dp374.pdf},
author = {Larry Goldstein, Yosef Rinott}
}
@booklet {neeman-otgofseimd2004,
title = {On the Generic (Im)possibility of Full Surplus Extraction in Mechanism Design},
journal = {Discussion Papers},
number = {350},
year = {2004},
month = {2},
publisher = {Econometrica 74 (2006), 213-233},
abstract = {A number of studies, most notably Cr{\texttimes}mer and McLean (1985, 1988), have shown that in Harsanyi type spaces of a fixed finite size, it is generically possible to design mechanisms that extract all the surplus~ from players, and as a consequence, implement any outcome as if the players{\textquoteright} private information~ were commonly known. In contrast, we show that within the set of common priors on the universal type~ space, the subset of priors that permit the extraction of the players{\textquoteright} full surplus is shy. Shyness is a~ notion of smallness for convex subsets of infinite-dimensional topological vector spaces (in our case,~ the set of common priors), which generalizes the usual notion of zero Lebesgue measure in~ finite-dimensional spaces.},
url = {/files/dp350.pdf},
author = {Neeman, Aviad Heifetz and Zvika}
}
@booklet {neeman-iimfeg2004,
title = {Inspection in Markets for Experience Goods},
journal = {Discussion Papers},
number = {349},
year = {2004},
month = {2},
abstract = {We consider a simple dynamic "collective reputation" model of a market for an experience good into{\textquoteright}{\textquoteright}~ which we introduce imperfect quality inspections. In each period two markets operate: a prime market{\textquoteright}{\textquoteright}~ for sellers with a good reputation, and a regular market for all other sellers. In every period, the quality of produced goods is inspected, and producers who have been found to produce low quality goods are barred from selling in the prime market in the next period. We demonstrate that the average quality of the good in both markets may decrease as inspection technology improves. A few applications of the model are discussed.},
url = {/files/db349.pdf},
author = {Neeman, Omer Moav and Zvika}
}
@booklet {ollehaggstrom-alolnfwm2004,
title = {A Law of Large Numbers for Weighted Majority},
journal = {Discussion Papers},
number = {363},
year = {2004},
month = {6},
abstract = {Consider an election between two candidates in which the voters{\textquoteright} choices are random and independent and the probability of a voter choosing the first candidate is p > 1/2. Condorcet{\textquoteright}s Jury Theorem which he derived from the weak law of large numbers asserts that if the number of voters tends to infinity then the probability that the first candidate will be elected tends to one. The notion of influence of a voter or its voting power is relevant for extensions of the weak law of large numbers for voting rules which are more general than simple majority. In this paper we point out two different ways to extend the classical notions of voting power and influences to arbitrary probability distributions. The extension relevant to us is the "effect" of a voter, which is a weighted version of the correlation between the voter{\textquoteright}s vote and the election{\textquoteright}s outcomes. We prove an extension of the weak law of large numbers to weighted majority games when all individual effects are small and show that this result does not apply to any voting rule which is not based on weighted majority.},
url = {/files/dp363.pdf},
author = {Olle Haggstrom, Gil Kalai and Elchanan Mossel}
}
@booklet {bar-hillel-laaslb2004,
title = {Loss Aversion and Status-Quo Label Bias},
journal = {Discussion Papers},
number = {373},
year = {2004},
month = {11},
publisher = {Social Cognition, 2010, Vol. 28, No. 2, Pp. 192-206.},
abstract = {It has been noted and demonstrated that people are reluctant to make changes in their current state (called the status quo bias, Samuelson \& Zeckhauser, 1988), and to trade objects they own (called the endowment effect, Thaler, 1980). This reluctance has been explained by a combination of loss aversion and reference dependence which causes the status quo to appear better than its alternative, ceteris paribus. In the present study, respondents were asked to rate the attractiveness of various policies, and to list their pros and cons. We find that just labeling some state of affairs status quo enhances its rating (which we call the status quo label bias); namely, a policy seemed more attractive to respondents who thought it is the status quo than to those who did not. An analysis of the listed pros and cons provides evidence that a model of the balance of a policy{\textquoteright}s pros and cons is a good predictor of that policy{\textquoteright}s attractiveness. Rendering the pros and cons in terms of losses and gains provides evidence that losses do, indeed, loom larger than gains. When put together, our results provide an empirical grounding for the loss aversion explanation of the status quo bias.},
url = {/files/ SQLB-373.pdf},
author = {Bar-Hillel, Avital Moshinsky and Maya}
}
@booklet {davidassaf-mevwtssr2004,
title = {Maximizing Expected Value with Two Stage Stopping Rules},
journal = {Discussion Papers},
number = {351},
year = {2004},
month = {3},
publisher = {Random Walks, Sequential Analysis and Related Topics, Chao Agnes Hsiung, Zhiliang Ying and Cun-Hui Zhang, Eds. World Scientific, Singapore (2006), 3-27},
abstract = {Let Xn, {\textbrokenbar},X1 be i.i.d. random variables with distribution function F and finite expectation. A statistician, knowing F, observes the X values sequentially and is given two chances to choose X{\textquoteright}s using stopping rules. The statistician{\textquoteright}s goal is to select a value of X as large as possible. Let Vn2 equal the expectation of the larger of the two values chosen by the statistician when proceeding optimally. We obtain the asymptotic behavior of the sequence Vn2 for a large class of F{\textquoteright}s belonging to the domain of attraction (for the maximum) D(GII{\textquoteright}{\textquoteright}$\pm$), where GII{\textquoteright}{\textquoteright}$\pm$ (x) = exp(-x-{\textquoteright}{\textquoteright}$\pm$){\textquoteright}{\textquoteright}(x > 0) with$\pm$ > 1. The results are compared with those for the asymptotic behavior of the classical one choice value sequence Vn1, as well as with the ""prophet value"" sequence E(maxXn, {\textbrokenbar},X1), and indicate that substantial improvement is obtained when given two chances to stop, rather than one.},
url = {/files/db351.pdf},
author = {David Assaf, Larry Goldstein and Ester Samuel-Cahn}
}
@booklet {judithavrahami-tmotdgpa2004,
title = {Mysteries of the Diagonal: Gender-Related Perceptual Asymmetries, The},
journal = {Discussion Papers},
number = {348},
year = {2004},
month = {2},
publisher = {Perception \& Psychophysics (In Press)},
abstract = {The paper reports a perceptual asymmetry for the two diagonals that is related to gender such that females prefer the diagonal spanning from top-right to bottom left (/) while males the opposite one ({\textquoteright}{\textquoteright}). This relationship is observed in a variety of tasks: Aesthetic judgment of paintings, spotting differences between two paintings, and visual search for a tilted line among similarly tilted distractors. The paper does not provide an explanation of the relationship between this asymmetry and gender but rules out several potential mediating factors, such as eye dominance, head tilt, handedness, and hemispheric differences. At the same time, the paper does outline the scope of the phenomenon: The asymmetry is found both for meaningful and for meaningless stimuli and both at brief and at extended presentation. Moreover, the asymmetry is found related to the tilt of the visual elements that require processing not to their location in the visual field.},
url = {/files/db348.pdf},
author = {Judith Avrahami, Taly Argaman and Dvora Weiss-Chasum}
}
@booklet {sudholter-otnotmbs2004,
title = {On the Non-Emptiness of the Mas-Colell Bargaining Set},
journal = {Discussion Papers},
number = {360},
year = {2004},
month = {5},
publisher = {Journal of Mathematical Economics 41 (2005), 1060-1068},
abstract = {We introduce an extension of the Mas-Colell bargaining set and construct, by an elabo- ration on a voting paradox, a superadditive four-person nontransferable utility game whose extended bargaining set is empty. It is shown that this extension constitutes an upper hemi- continuous correspondence. We conclude that the Mas-Colell bargaining set of a non-levelled superadditive NTU game may be empty.},
url = {/files/dp360.pdf},
author = {Sudholter, Bezalel Peleg and Peter}
}
@booklet {samuel-cahn-otcsoaes2004,
title = {Optimal Two Choice Stopping on an Exponential Sequence},
journal = {Discussion Papers},
number = {375},
year = {2004},
month = {11},
publisher = {Sequential Analysis 25 (2006), 351-363},
abstract = {Asymptotic results for the problem of optimal two choice stopping on an n elementlong i.i.d. sequence Xn, . . . ,X1 have previously been obtained for two of the threedomains of attraction. An asymptotic result is proved for the exponential distribution,a representative from the remaining, Type I domain, and it is conjectured that thesame behavior obtains for all Type I distributions.},
url = {/files/dp375.pdf},
author = {Samuel-Cahn, Larry Goldstein and Ester}
}
@booklet {oliviergossner-ouocr2004,
title = {Optimal Use of Communication Resources},
journal = {Discussion Papers},
number = {377},
year = {2004},
month = {12},
abstract = {We study a repeated game with asymmetric information about a dynamic state of nature. In the course of the game, the better informed player can communicate some or all of his information with the other. Our model covers costly and/or bounded communication. We characterize the set of equilibrium payoffs, and contrast these with the communication equilibrium payoffs, which by definition entail no communication costs.},
url = {/files/dp377.pdf},
author = {Olivier Gossner, Penelope Hernandez and Abraham Neyman}
}
@booklet {yaari-psar2004,
title = {Parity, Sympathy, and Reciprocity},
journal = {Discussion Papers},
number = {354},
year = {2004},
month = {3},
url = {/files/dp354.pdf},
author = {Yaari, Werner G{\textonequarter}th and Menahem E.}
}
@booklet {harel-pmatlabctle2004,
title = {Probability Matching and the Law: A Behavioral Challenge to Law \& Economics},
journal = {Discussion Papers},
number = {368},
year = {2004},
month = {7},
abstract = {Contrary to the conventional assumption that individuals maximize payoffs, robust experimental studies show that individuals who face repeated choices involving probabilistic costs and benefits often make sub-optimal decisions by applying the strategy of "probability matching." The following study, by integrating this literature with the traditional models of law and economics, and through experimental illustration, presents the possible effects of probability matching in the legal context. The paper also explores how probability matching can guide policy making.},
url = {/files/dp368.pdf},
author = {Harel, Ehud Guttel and Alon}
}
@booklet {mayabar-hillel-sakmctacsii2004,
title = {Scoring and Keying Multiple Choice Tests: A Case Study in Irrationality},
journal = {Discussion Papers},
number = {370},
year = {2004},
month = {9},
publisher = {Mind and Society 4 (2005), 3-12},
abstract = {We offer a case-study in irrationality, showing that even in a high stakes, deliberate context, highly intelligent professionals may adopt dominated practices. Multiple-choice tests (MCTs) enjoy many advantages that made them popular tools in educational and psychological measurement. But they suffer from the so-called guessing problem: test-makers cannot distinguish lucky guesses from answers based on knowledge. One way professional test-makers have dealt with this problem is by attempting to lower the incentive to guess, through penalizing errors (called formula scoring). Another is to rid tests of various cues (e.g., a preponderance of correct answers in middle positions) that might help testwise test-takers guess at better than chance odds. Key balancing is the strategy test-takers adopted for ridding tests of positional biases. We show that formula scoring and key balancing, though widespread and intuitively appealing, are in fact "irrational" practices. They do not dispose of the guessing problem and are fraught with problems of their own. Yet they persist, even in the presence of more rational alternatives: Number right scoring is superior to formula scoring, and key randomization is superior to key balancing.},
url = {/files/dp370.pdf},
author = {Maya Bar-Hillel, David Budescu and Yigal Attali}
}
@booklet {kalai-si2004,
title = {Social Indeterminacy},
journal = {Discussion Papers},
number = {362},
year = {2004},
month = {6},
publisher = {Econometrica 72 (2004), 1565-1581},
abstract = {An extension of Condorcet{\textquoteright}s paradox by McGarvey (1953) asserts that for every asymmetric relation R on a finite set of candidates there is a strict-preferences voter profile that has the relation R as its strict simple majority relation. We prove that McGarvey{\textquoteright}s theorem can be extended to arbitrary neutral monotone social welfare functions which can be described by a strong simple game G if the voting power of each individual, measured by the it Shapley-Shubik power index, is sufficiently small. Our proof is based on an extension to another classic result concerning the majority rule. Condorcet studied an election between two candidates in which the voters{\textquoteright} choices are random and independent and the probability of a voter choosing the first candidate is p > 1/2. Condorcet{\textquoteright}s Jury Theorem asserts that if the number of voters tends to infinity then the probability that the first candidate will be elected tends to one. We prove that this assertion extends to a sequence of arbitrary monotone strong simple games if and only if the maximum voting power for all individuals tends to zero.},
url = {/files/dp362.pdf},
author = {Gil Kalai}
}
@booklet {mas-colell-sudane2004,
title = {Stochastic Uncoupled Dynamics and Nash Equilibrium},
journal = {Discussion Papers},
number = {371},
year = {2004},
month = {9},
publisher = {Games and Economic Behavior 57 (2006), 286-303},
abstract = {In this paper we consider dynamic processes, in repeated games, that are subject to the natural informational restriction of uncoupledness. We study the almost sure convergence to Nash equilibria, and present a number of possibility and impossibility results. Basically, we show that if in addition to random moves some recall is introduced, then successful search procedures that are uncoupled can be devised. In particular, to get almost sure convergence to pure Nash equilibria when these exist, it suffices to recall the last two periods of play.},
url = {/files/ uncoupl-st.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {toxvaerd-smwatomc2004,
title = {Strategic Merger Waves: A Theory of Musical Chairs},
journal = {Discussion Papers},
number = {359},
year = {2004},
month = {5},
abstract = {This paper proposes an explanation of merger waves based on the interaction between competitive pressure and irreversibility of mergers in an uncertain environment. A set of acquirers compete over time for scarce targets. At each point in time, an acquirer can either postpone a takeover attempt, or raid immediately. By postponing the takeover attempt, an acquirer may gain from more favorable future market conditions, but runs the risk of being preempted by rivals. First, a complete information model is considered, and it is shown that the above tradeoff leads to a continuum of subgame perfect equilibria in monotone strategies that are strictly Pareto ranked. All these equilibria share the feature that all acquirers rush simultaneously in merger waves. The model is then extended to a dynamic global game by introducing slightly noisy private information about merger profitability. This game is shown to have a unique Markov perfect Bayesian equilibrium in monotone strategies, and the timing of the merger wave can thus be predicted. Last, the comparative dynamics predictions of the model are related to stylized facts.},
url = {/files/dp359.pdf},
author = {Flavio Toxvaerd}
}
@booklet {toxvaerd-atood2004,
title = {A Theory of Optimal Deadlines},
journal = {Discussion Papers},
number = {357},
year = {2004},
month = {5},
abstract = {This paper sets forth a model of contracting for delivery in an environment with time to build and adverse selection. The optimal contract is derived and characterized and it takes the form of a deadline contract. Such a contract stipulates a deadline for delivery for each possible type of agent efficiency. The optimal contract induces inefficient delay by using delivery time as a screening device. Furthermore, rents are decreasing in the agent{\textquoteright}s efficiency. In meeting the deadline, the agent{\textquoteright}s effort is strictly increasing over time, due to discounting. It is shown that increasing the project{\textquoteright}s gross value decreases delivery time, while the scale or difficulty of the project decreases it. Last, it is shown that the agent{\textquoteright}s rents are increasing in both project difficulty and gross project value.},
url = {/files/dp357.pdf},
author = {Flavio Toxvaerd}
}
@booklet {toxvaerd-tote2004,
title = {Time of the Essence},
journal = {Discussion Papers},
number = {358},
year = {2004},
month = {5},
abstract = {In most industries, ranging from information systems development to construction, an overwhelming proportion of projects are delayed beyond estimated completion time. This fact constitutes somewhat of a puzzle for existing theory. The present paper studies project delays and optimal contracts under moral hazard in a setting with time to build. Within this setup, project delays are found to be most likelyto happen at early stages of development, and intimately connected to the degree ofcommitment of the procurer and the class of contracts that can be enforced. The firstbest,optimal spot contracting and optimal long-term contract scenarios are analyzed, aswell as commonly encountered additional constraints on the long-term contract.},
url = {/files/dp358.pdf},
author = {Flavio Toxvaerd}
}
@booklet {zultan-ufciubve2004,
title = {Unilateral Face-to-Face Communication in Ultimatum Bargaining "A Video Experiment},
journal = {Discussion Papers},
number = {369},
year = {2004},
month = {9},
abstract = {It is commonly accepted that face-to-face communication induces cooperation. The experiment disentangles communication and social effect (replication of Roth, 1995) and examines the components of the social effect with the help of unilateral communication. Results suggest that separate processes, both of a strategic and of an affective-social nature may induce cooperative outcomes in ultimatum bargaining with pre-play communication, depending on the communication protocol. Unilateral communication is found to have weaker effects than bilateral communication, and affects especially the recipient of the communication.},
url = {/files/dp369.pdf},
author = {Zultan, Carsten Schmidt and Ro \~{}i}
}
@booklet {fiedler-otaoctsrdvtis2003,
title = {On the Accentuation of Contingencies: The Sensitive Research Designer Versus the Intuitive Statistician},
journal = {Discussion Papers},
number = {346},
year = {2003},
month = {12},
publisher = {Published As: "Nonproportional Sampling and the Amplification of Correlations", Psychological Science 17 (2006), 715-720},
abstract = {The information used in reaching a decision between alternatives is often gleaned through samples drawn from the distributions of their outcomes. Since in most cases it is the direction of the difference in value, rather than its magnitude, that is of primary interest, the decision maker may benefit from sampling data in a way that will accentuate, rather than accurately estimate, the magnitude of that difference, as it helps to reach a decision swiftly and confidently. A reanalysis of performance in a study by Fiedler, Brinkmann, Betsch, and Wild (Journal of Experimental Psychology: General, 2000, 129, 399-418), in which participants had the freedom to sample data any way they wished, demonstrates that their apparently poor performance as estimators of conditional probability may actually reflect sophisticated sampling, which resulted in accentuating the sample value of the degree of contingency in the data. Thus, participants might be characterized as "sensitive research designers", intent on increasing the chances of detecting an effect (if one existed).},
url = {/files/dp346.pdf},
author = {Fiedler, Yaakov Kareev and Klaus}
}
@booklet {shapira-aasi2003,
title = {Aspiration and Survival in Jeopardy! },
journal = {Discussion Papers},
number = {331},
year = {2003},
month = {9},
publisher = {(revised indp $\#$417)},
abstract = {Behavior in dynamic competitive situations requires decision makers to evaluate their own as well as their competitors positions. This paper uses data from a realistic competitive risk taking setting, Jeopardy s Tournament of Champions, to test whether individual players choose the strategic best response in making their betting decisions. The analyses show that the percentage of players choosing the strategic best response is very low, a rather surprising finding because the Tournament of Champions is contested by the very best and most experienced players of the Jeopardy game. We conjecture that performance aspiration and survival targets guide risk-taking behavior in competitive situations. Furthermore, in situations where decisions are made under pressure, contestants tend to focus on one target while ignoring alternative targets and the choices that are available to their competitors. This may lead them to select inferior competitive strategies.},
url = {/files/dp331.pdf},
author = {Shapira, Elizabeth Boyle and Zur}
}
@booklet {smorodinsky-avovmg2003,
title = {Asymptotic Values of Vector Measure Games},
journal = {Discussion Papers},
number = {344},
year = {2003},
month = {11},
publisher = {Mathematics of Operations Research (forthcoming)},
abstract = {The asymptotic value, introduced by Kannai in 1966, is an asymptotic approach to the notion of the Shapley value for games with infinitely many players. A vector measure game is a game v where the worth v(S) of a coalition S is a function f of {\textonequarter}(S) where {\textonequarter} is a vector measure. Special classes of vector measure games are the weighted majority games and the two-house weighted majority games where a two-house weighted majority game is a game in which a coalition is winning if and only if it is winning in two given weighted majority games. All weighted majority games have an asymptotic value. However, not all two-house weighted majority games have an asymptotic value. In this paper we prove that the existence of infinitely many atoms with sufficient variety suffice for the existence of the asymptotic value in a general class of nonsmooth vector measure games that includes in particular two-house weighted majority games.},
url = {/files/ Neyman344.pdf},
author = {Smorodinsky, Abraham Neyman and Rann}
}
@booklet {sheshinski-oaeact2003,
title = {On Atmosphere Externality and Corrective Taxes},
journal = {Discussion Papers},
number = {328},
year = {2003},
month = {7},
abstract = {It has been argued that in the presence of an Atmosphere Externality and competitive behavior by households, a uniform commodity tax on the externality - generating good attains the first best. It is demonstrated, however, that if income redistribution is desirable then personalized taxes are required for a second-best optimum. Each of these taxes is the sum of a uniform (across households) tax and a component, positive or negative, which depends on the household s income and demand elasticities. Second-best optimal indirect taxes and rules for investment in externality - reducing measures are also considered.},
url = {/files/db328.pdf},
author = {Eytan Sheshinski}
}
@booklet {hart-aaotcnuv2003,
title = {An Axiomatization of the Consistent Non-Transferable Utility Value},
journal = {Discussion Papers},
number = {337},
year = {2003},
month = {10},
publisher = {International Journal of Game Theory 33 (2005), 355-366},
abstract = {The Maschler-Owen consistent value for non-transferable utility games is axiomatized, by means of a marginality axiom.},
author = {Sergiu Hart}
}
@booklet {barryoneill-bwaa2003,
title = {Bargaining with an Agenda},
journal = {Discussion Papers},
number = {315},
year = {2003},
month = {5},
publisher = {Games and Economic Behavior 48 (2004), 139-153},
abstract = {Gradual bargaining is represented by an agenda: a family of increasing sets of joint utilities, parameterized by time. A solution for gradual bargaining specifies an agreement at each time. We axiomatize an ordinal solution, i.e., one that is covariant with order-preserving transformations of utility. It can be viewed as the limit of a step-by-step bargaining in which the agreement of the last negotiation becomes the disagreement point for the next. The stepwise agreements may follow the Nash solution, the Kalai-Smorodinsky solution or many others.},
url = {/files/dp315.pdf},
author = {Barry O{\textquoteright}Neill, Dov Samet, Zvi Wiener and Winter, Eyal}
}
@booklet {sheshinski-brasolociasm2003,
title = {Bounded Rationality and Socially Optimal Limits on Choice in a Self-Selection Model},
journal = {Discussion Papers},
number = {330},
year = {2003},
month = {7},
abstract = {When individuals choose from whatever alternatives available to them the one that maximizes their utility then it is always desirable that the government provide them with as many alternatives as possible. Individuals, however, do not always choose what is best for them and their mistakes may be exacerbated by the availability of options. We analyze self-selection models, when individuals know more about themselves than it is possible for governments to know, and show that it may be socially optimal to limit and sometimes to eliminate individual choice. As an example, we apply Luce s (1959) model of random choice to a work-retirement decision model and show that the optimal provision of choice is positively related to the degree of heterogeneity in the population and that even with very small degrees of non-rationality it may be optimal not to provide individuals any choice.},
url = {/files/dp330.pdf},
author = {Eytan Sheshinski}
}
@booklet {warglien-coateorateossifaatr2003,
title = {Cognitive Overload and the Evaluation of Risky Alternatives: The Effects of Sample Size, Information Format and Attitude To Risk},
journal = {Discussion Papers},
number = {340},
year = {2003},
month = {10},
abstract = {When the amount of information to be dealt with exceeds people s short-term memory capacity, they must resort to the sampling of information. In the present study we show that, under conditions of cognitive overload (which could result from decision-making under stress, time constraints or information abundance), individuals exhibit systematic differences in estimating variance. Moreover, these differences critically depend on the format of the evidence presented: Variance is downward attenuated when information is presented analogically, but amplified when it is presented numerically. These distortions in the perception of variance affect individuals pricing of risky alternatives. We suggest that these results may help to explain economic anomalies, such as excess trading in financial markets. We also point out possibilities for manipulating the perception of variability and normative implications concerning the presentation of information on the variance of phenomena.},
url = {/files/ Kareev340.pdf},
author = {Warglien, Yaakov Kareev and Massimo}
}
@booklet {hart-aconuv2003,
title = {A Comparison of Non-Transferable Utility Values},
journal = {Discussion Papers},
number = {338},
year = {2003},
month = {10},
publisher = {Theory and Decision 56 (2004), 35-46},
abstract = {Three values for non-transferable utility games {\textendash} the Harsanyi NTU-value, the Shapley NTU-value, and the Maschler-Owen consistent NTU-value {\textendash} are compared in a simple example.},
url = {https://3ntu-val.html},
author = {Sergiu Hart}
}
@booklet {pradeepdubey-cvatbpi2003,
title = {Compound Voting and the Banzhaf Power Index},
journal = {Discussion Papers},
number = {333},
year = {2003},
month = {9},
abstract = {We present three axioms for a power index defined on the domain of simple (voting) games. Positivity requires that no voter has negative power, and at least one has positive power. Transfer requires that, when winning coalitions are enhanced in a game, the change in voting power depends only on the change in the game, i.e., on the set of new winning coalitions. The most crucial axiom is composition: the value of a player in a compound voting game is the product of his power in the relevant first-tier game and the power of his delegate in the second-tier game. We prove that these three axioms categorically determine the Banzhaf index.},
url = {/files/ Haimanko333.pdf},
author = {Pradeep Dubey, Ezra Einy and Ori Haimanko}
}
@booklet {peters-cvswacov2003,
title = {Consistent Voting Systems with a Continuum of Voters},
journal = {Discussion Papers},
number = {308},
year = {2003},
month = {1},
publisher = {Social Choice and Welfare 27 (2006), 477-492},
abstract = {Voting problems with a continuum of voters and finitely many alternatives are considered. The classical Arrow and Gibbard-Satterthwaite theorems are shown to persist in this model, not for single voters but for coalitions of positive size. The emphasis of the study is on strategic considerations, relaxing the nonmanipulability requirement: are there social choice functions such that for every profile of preferences there exists a strong Nash equilibrium resulting in the alternative assigned by the social choice function? Such social choice functions are called exactly and strongly consistent. The study offers an extension of the work of Peleg (1978a) and others. Specifically, a class of anonymous social choice functions with the required property is characterized through blocking coefficients of alternatives, and associated effectivity functions are studied. Finally, representation of effectivity functions by game forms having a strong Nash Equilibrium is studied.},
url = {/files/dp308.pdf},
author = {Peters, Bezalel Peleg and Hans}
}
@booklet {bezalelpeleg-cioscc2003,
title = {Constitutional Implementation of Social Choice Correspondences},
journal = {Discussion Papers},
number = {323},
year = {2003},
month = {7},
publisher = {International Journal of Game Theory 33 (2005), 381-396},
abstract = {A game form constitutionally implements a social choice correspondence if it implements it in Nash equilibrium and, moreover, the associated effectivity functions coincide. This paper presents necessary and sufficient conditions for a unanimous social choice correspondence to be constitutionally implementable, and sufficient and almost necessary conditions for an arbitrary (but surjective) social choice correspondence to be constitutionally implementable. It is shown that the results apply to interesting classes of scoring and veto rules.},
url = {/files/dp323.pdf},
author = {Bezalel Peleg and Ton Storcken}
}
@booklet {peleg-otcoroef2003,
title = {On the Continuity of Representations of Effectivity Functions},
journal = {Discussion Papers},
number = {324},
year = {2003},
month = {7},
publisher = {Journal of Mathematical Economics 42 (2006), 827-842},
abstract = {An effectivity function assigns to each coalition of individuals in a society a family of subsets of alternatives such that the coalition can force the outcome of society{\textquoteright}s choice to be a member of each of the subsets separately. A representation of an effectivity function is a game form with the same power structure as that specified by the effectivity function. In the present paper we investigate the continuity properties of the outcome functions of such representation. It is shown that while it is not in general possible to find continuous representations, there are important subfamilies of effectivity functions for which continuous representations exist. Moreover, it is found that in the study of continuous representations one may practically restrict attention to effectivity functions on the Cantor set. Here it is found that general effectivity functions have representations with lower or upper semicontinuous outcome function.},
url = {/files/dp324.pdf},
author = {Peleg, Hans Keiding and Bezalel}
}
@booklet {jean-marctallon-cbac2003,
title = {Contradicting Beliefs and Communication},
journal = {Discussion Papers},
number = {311},
year = {2003},
month = {4},
abstract = {We address the issue of the representation as well as the evolution of (possibly) mistaken beliefs. We develop a formal setup (a mutual belief space) in which agents might have a mistaken view of what the model is. We then model a communication process, by which agents communicate their beliefs to one another. We define a revision rule that can be applied even when agents have contradictory beliefs. We study its properties and, in particular, show that, when mistaken, agents do not necessarily eventually agree after communicating their beliefs. We finally address the dynamics of revision and show that when beliefs are mistaken, the order of communication may affect the resulting belief structure.},
url = {/files/dp311.pdf},
author = {Jean-Marc Tallon, Jean-Christophe Vergnaud, Shmuel Zamir}
}
@booklet {weiss-dagfanpe2003,
title = {Decay and Growth for a Nonlinear Parabolic Equation},
journal = {Discussion Papers},
number = {342},
year = {2003},
month = {10},
publisher = {Proceedings of the American Mathematical Society 133 (2005), 2613-2620},
abstract = {We prove a difference equation analogue for the decay-of-mass result for the nonlinear parabolic equation ut = {\textquotedblright}u + {\textonequarter} |{\textasciicircum}{\textdaggerdbl}u| when {\textonequarter} 0.},
url = {/files/ decay.html},
author = {Weiss, Sergiu Hart and Benjamin, Nathans,}
}
@booklet {schweinzer-dacvpiarg2003,
title = {Dissolving a Common Value Partnership in a Repeated {\textquoteright}queto{\textquoteright} Game},
journal = {Discussion Papers},
number = {318},
year = {2003},
month = {5},
abstract = {We analyse a common value, alternating ascending bid, first price auction as a repeated game of incomplete information where the bidders hold equal property rights to the object auctioned off. Consequently they can accept (by quitting) or veto any proposed settlement. We characterise the essentially unique, sequentially rational dynamic Bayesian Nash equilibrium of this game under incomplete information on one side and discuss its properties.},
url = {/files/dp318.pdf},
author = {Paul Schweinzer}
}
@booklet {bar-hillel-epdk2003,
title = {E Psychologist Daniel Kahneman},
journal = {Discussion Papers},
number = {334},
year = {2003},
month = {9},
publisher = {The Economic Quarterly 4 (2003), 771-780},
abstract = {A Nobel Prize in Economics was given to the psychologist Daniel Kahneman for his joint research with the late psychologist Amos Tversky on decision making under uncertainty and on subjective judgments of uncertainty. The two proposed Prospect Theory as a descriptive alternative to Utility Theory, the reigning normative theory of choice under uncertainty. Kahneman and Tversky argued that human psychology prevents people from being rational in the sense required by Utility Theory {\textendash} consistency {\textendash} for two main reasons. First, people are more sensitive to changes in position (economic or otherwise) than to final positions, a fact ignored by Utility Theory. Thus, they value a 50\% discount on a 100NIS item more than a 5\% discount on a 1000 NIS item. Moreover, they are more sensitive to changes for the worse than to changes for the better. Second, we are sensitive not just to outcomes, but to outcomes-under-a-description, which makes us inconsistent from a consequentialist veiwpoint (e.g., we don{\textquoteright}t feel the same about losing 100 NIS on our way to the theater boxoffice, vs. losing a 100 NIS ticket on our way to the theater). The article describes some of the empirical observations that led to the development of Prospect Theory, and some of its basic tenets.},
url = {/files/dp334.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {bergman-eopweaaatrib2003,
title = {Ecologies of Preferences with Envy as an Antidote to Risk-Aversion in Bargaining},
journal = {Discussion Papers},
number = {322},
year = {2003},
month = {6},
abstract = {Models have been put forward recently that seem to be successful in explaining apparently anomalous experimental results in the Ultimatum Game, where responders reject positive offers. While imparting fixed preference orders to fully rational agents, these models depart from traditional models by assuming preferences that take account not only of the material payoff to oneself, but also of that which is given to others. However, they leave open the question of how an agent{\textquoteright}s economic survival is helped by a preference order that advises him to leave money on the table. Our answer is that, indeed, doing so does not help. But that the same envious preference order that ill advises in some circumstances to reject an "insultingly" small offer, advises well in other circumstances, when it helps the same agent to overcome his risk- aversion and to offer a risky, tough offer that yields him a higher expected dollar gain. We show the existence of population distributions where the two effects exactly balance out across different preference types. These distributions are stable, stationary, and inefficient, in which different preferences asymptotically are represented, and where, as commonly observed in an Ultimatum Game, positive offers are made, of which some are rejected with positive probability. Our theory yields new testable hypotheses.},
url = {/files/dp322.pdf},
author = {Bergman, Nitai Bergman and Yaacov}
}
@booklet {winter-emfmpg2003,
title = {Efficient Mechanisms for Multiple Public Goods},
journal = {Discussion Papers},
number = {314},
year = {2003},
month = {5},
publisher = {Journal of Public Economics 88 (2004), 629-644},
abstract = {We propose two sequential mechanisms for efficient production of public goods. Our analysis differs from the existing literature in allowing for the presence of multiple public goods and in also being simple. While both mechanisms ensure efficiency, the payoffs in the first mechanism are asymmetric, being sensitive to the order in which agents move. The second mechanism corrects for this through a two-stage game where the order of moves in the second stage are randomly determined. The payoffs from the second mechanism correspond to the Shapley value of a well-defined game which summarizes the production opportunities available to coalitions in the economy.},
url = {/files/dp314.pdf},
author = {Winter, Suresh Mutuswami and Eyal}
}
@booklet {motro-eisacwtdss2003,
title = {ESS in Symmetric Animal Conflicts with Time Dependent Strategy Sets},
journal = {Discussion Papers},
number = {339},
year = {2003},
month = {10},
abstract = {Animal conflicts are often characterized by time dependent strategy sets. This paper considers the following type of animal conflicts: a member of a group is at risk and needs the assistance of another member to be saved. As long as assistance is not provided, the individual which is at risk has a positive, time dependent rate of dying. Each of the other group members is a potential helper. Assisting this individual accrues a cost, but losing him decreases the inclusive fitness of each group member. A potential helper s interval between the moment an individual finds itself at risk and the moment it assists is a random variable, hence its strategy is to choose the probability distribution for this random variable. Assuming that each of the potential helpers knows the others strategies, we show that the ability to observe their realizations influences the Evolutionarily Stable Strategies (ESS) of the game. According to our results, where the realizations can be observed ESS always exist: immediate assistance, no assistance and delayed assistance. Where the realizations cannot be observed ESS do not always exist, immediate assistance and no assistance are possible ESS, while delayed assistance cannot be an ESS. We apply our model to the n brothers problem and to the parental investment conflict.},
url = {/files/dp339.pdf},
author = {Motro, Osnat Yaniv and Uzi}
}
@booklet {gorodeisky-esflp2003,
title = {Evolutionary Stability for Large Populations},
journal = {Discussion Papers},
number = {312},
year = {2003},
month = {4},
publisher = {Mathematics of Operation Research 31 (2006), 369-380},
abstract = {It has been shown (Hart [2]) that the backward induction (or subgame-perfect) equilibrium of a perfect information game is the unique stable outcome for dynamic models consisting of selection and mutation, when the mutation rate is low and the populations are large, under the assumption that the expected number of mutations per generation is bounded away from zero.Here it is shown that one can dispense with this last condition. In particular, it follows that the backward induction equilibrium is evolutionarily stable for large populations.},
url = {/files/dp312.pdf},
author = {Ziv Gorodeisky}
}
@booklet {noambar-shai-fdibumofc2003,
title = {Flight Durations in Bumblebees Under Manipulation of Feeding Choices},
journal = {Discussion Papers},
number = {325},
year = {2003},
month = {7},
publisher = {Journal of Insect Behavior 17 (2004), 155-168},
abstract = {Foraging bees spend less time flying between flowers of the same species than when flying between individuals of different species. This time saving has been suggested as a possible advantage of flower-constant foraging. We hypothesized that the time required to switch flower type increases if (a) such switches are infrequent and (b) the bees need to decide whether to switch or not. Laboratory reared bumblebees were taught to forage on artificial feeders that were identical in morphology and reward schedule, but were marked by either a blue or a yellow landing surface. In the first two experiments bees foraged alternatively between two feeders. The landing surface was manipulated to coerce the bees to perform either a color-constant or a color-shift flight movement. In Experiment 1 we switched the landing surfaces every 2-3 visits, while in Experiment 2 the bees performed 6-7 color-constant flights before having to perform a color-shift flight. In the third experiment, the bees were presented with binary choices and had to decide to make a color-constant or a color-shift flight. When feeder colors were changed frequently (Experiment 1), we detected no difference between color-constant and color-shift inter-visit times. When bees were repeatedly exposed to one color (Experiment 2), color shifts required a significantly longer time than color-constant flights. When allowed to choose (Experiment 3), bees performed more color-constant flights than color-shift flights. Inter-visit times were similar for color-constant and color-shift flights in this experiment. Overall flight times were slightly but non-significantly longer than in experiments 1 and 2. The results suggest that bees indeed save flight time though flower-constant foraging. However, this time saving is small (~ 1 s / flower visit), and appears only when switches between flower types are infrequent. Additional selective advantages likely favor flower-constant foraging.},
url = {/files/dp325.pdf},
author = {Noam Bar-shai, Rana Samuels, Tamar Keasar, Uzi Motro and Avi Shmida}
}
@booklet {winter-iad2003,
title = {Incentives and Discrimination},
journal = {Discussion Papers},
number = {313},
year = {2003},
month = {5},
publisher = {American Economic Review 94 (2003), 764-773},
abstract = {Optimal incentive mechanisms may require that agents are rewarded differentially even when they are completely identical and are induced to act the same. We demonstrate this point by means of a simple incentive model where agents decisions about effort exertion is mapped into a probability that the project will succeed. We give necessary and sufficient conditions for optimal incentive mechanisms to be discriminatory. We also show that full discrimination across all agents is required if and only if the technology has increasing return to scale. In the non-symmetric framework we show that negligible differences in agents attributes may result in major differences in rewards in the unique optimal mechanism.},
url = {/files/dp313.pdf},
author = {Winter, Eyal}
}
@booklet {salant-ltdosc2003,
title = {Learning the Decisions of Small Committees},
journal = {Discussion Papers},
number = {332},
year = {2003},
month = {9},
abstract = {A committee is a collection of members, where every member has a linear ordering on the alternatives of a finite ground set X. The committee chooses between pairs of alternatives drawn from X by a simple majority vote. The committee{\textquoteright}s choices induce a preference relation on X. In this paper, we study the possibility of learning preference relations of small committees from examples. We prove that it is impossible to precisely learn the preference relation of a committee before seeing all its choices, even if a teacher guides the learner through the learning process. We also prove that a learner can approximately learn the preference relation of a committee from a relatively few random examples.},
url = {/files/ Salant332.pdf},
author = {Yuval Salant}
}
@booklet {thierryfoucault-lobaamfl2003,
title = {Limit Order Book as a Market for Liquidity},
journal = {Discussion Papers},
number = {321},
year = {2003},
month = {6},
abstract = {We develop a dynamic model of an order-driven market populated by discretionary liquidity traders. These traders differ by their impatience and seek to minimize their trading costs by optimally choosing between market and limit orders. We characterize the equilibrium order placement strategies and the waiting times for limit orders. In equilibrium less patient traders are likely to demand liquidity, more patient traders are more likely to provide it. We find that the resiliency of the limit order book increases with the proportion of patient traders and decreases with the order arrival rate. Furthermore, the spread is negatively related to the proportion of patient traders and the order arrival rate. We show that these findings yield testable predictions on the relation between the trading intensity and the spread. Moreover, the model generates predictions for time-series and cross-sectional variation in the optimal order-submission strategies. Finally, we find that imposing a minimum price variation improves the resiliency of a limit order market. For this reason, reducing the minimum price variation does not necessarily reduce the average spread in limit order markets.},
url = {/files/db321.pdf},
author = {Thierry Foucault, Ohad Kadan and Eugene Kandel}
}
@booklet {salant-lcrfr2003,
title = {Limited Computational Resources Favor Rationality},
journal = {Discussion Papers},
number = {320},
year = {2003},
month = {6},
abstract = {{\textquoteright}~A choice function is a rule that chooses a single alternative from every set of alternatives drawn from a finite ground set. A rationalizable choice function satisfies the consistency condition; i.e., if an alternative is chosen from a set A, then the same alternative is also chosen from every subset of A that contains it. In this paper we study computational aspects of choice, through choice functions. We explore two simple models that demonstrate two important aspects of choice procedures: the ability to remember the past and the capability to perform complex computations. We show that a choice function is optimal in terms of the amount of memory and the computational power required for its computation if and only if the function is rationalizable. We also show that the computation of most other choice functions, including some natural ones, requires much more memory and computational power.{\textquoteright}{\textquoteright}},
url = {/files/db320.pdf},
author = {Yuval Salant}
}
@booklet {tamarkugler-mvnaei2003,
title = {Markets Versus Negotiations: An Experimental Investigation},
journal = {Discussion Papers},
number = {319},
year = {2003},
month = {6},
publisher = {Economic Behavior 56 (2006), 121-134},
abstract = {We consider the consequences of competition between two types of experimental exchange mechanisms, a decentralized bargaining market, and a centralized market. The experiment demonstrates that decentralized bargaining is subject to a process of unraveling in which relatively weak traders (buyers with high willingness to pay and sellers with low costs) continuously find trading in the centralized market more attractive until almost no opportunities for mutually beneficial trade remain outside the centralized marketplace.},
url = {/files/db319.pdf},
author = {Tamar Kugler, Zvika Neeman, Nir Vulkan}
}
@booklet {goldberg-otmorgwimace2003,
title = {On the Minmax of Repeated Games with Imperfect Monitoring: A Computational Example},
journal = {Discussion Papers},
number = {345},
year = {2003},
month = {12},
abstract = {The minmax in repeated games with imperfect monitoring can differ from the minmax of those games with perfect monitoring. This can happen when two or more players are able to gain common information known only to themselves, and utilize this information at a later stage. Gossner and Tomala [1] showed that in a class of such games, the minmax is given by a weighted average of the payoffs of two main strategies: one in which the information is gained, and the other in which the information is utilized. While this result is implicit, all examples analyzed to date require a single main strategy in which information is created and utilized simultaneously. We show that two strategies are indeed needed by providing and solving a concrete example of a three-player game.},
url = {/files/ Yair345.pdf},
author = {Goldberg, Yair}
}
@booklet {sheshinski-notopoa2003,
title = {Note on the Optimum Pricing of Annuities},
journal = {Discussion Papers},
number = {326},
year = {2003},
month = {7},
abstract = {In a perfectly competitive market for annuities with full information, the price of annuities is equal to individuals{\textquoteright} (discounted) survival probabilities. That is, prices are actuarially fair. In contrast, the pricing implicit in social security systems invariably allows for cross subsidization between different risk groups (males/females). We examine the utilitarian approach to the optimum pricing of annuities and show how the solution depends on the joint distribution of survival probabilities and incomes in the population.},
url = {/files/db326.pdf},
author = {Eytan Sheshinski}
}
@booklet {garybornstein-otmwtononlaesoaapg2003,
title = {One Team Must Win, the Other Need Only Not Lose: An Experimental Study of an Asymmetric Participation Game},
journal = {Discussion Papers},
number = {317},
year = {2003},
month = {5},
publisher = {Journal of Behavioral Decision Making 18 (2005), 111-123},
abstract = {We studied asymmetric competition between two (three-person) groups. Each group member received an initial endowment and had to decide whether or not to contribute it. The group with more~ contributions won the competition and each of its members received a reward. The members of the losing group received nothing. The asymmetry was created by randomly and publicly selecting one group beforehand to be the winning group in the case of a tie. A theoretical analysis of this~ asymmetric game generates two qualitatively different solutions, one in which members of the group that wins in the case of a tie are somewhat more likely to contribute than members of the group that loses, and another in which members of the group that loses in the case of a tie are much more likely to contribute than members of the group that wins. The experimental results are clearly in line with the first solution.},
url = {/files/dp317.pdf},
author = {Gary Bornstein, Tamar Kugler and Shmuel Zamir}
}
@booklet {neyman-occbbrp2003,
title = {Online Concealed Correlation by Boundedly Rational Players},
journal = {Discussion Papers},
number = {336},
year = {2003},
month = {10},
abstract = {In a repeated game with perfect monitoring, correlation among a group of players may evolve in the common course of play (online correlation). Such a correlation may be concealed from a boundedly rational player. The feasibility of such "online concealed correlation" is quantified by the individually rational payoff of the boundedly rational player.We show that "strong" players, i.e., players whose strategic complexity is less stringently bounded, can orchestrate online correlation of the actions of "weak" players, in a manner that is concealed from an opponent of "intermediate" strength. The result is illustrated in two models, each captures another aspect of bounded rationality. In the first, players use bounded recall strategies. In the second, players use strategies that are implementable by finite automata.},
url = {/files/ Neyman336.pdf},
author = {Neyman, Gilad Bavly and Abraham}
}
@booklet {oliviergossner-omp2003,
title = {Online Matching Pennies},
journal = {Discussion Papers},
number = {316},
year = {2003},
month = {5},
abstract = {We study a repeated game in which one player, the prophet, acquires more information than another player, the follower, about the play that is going to be played. We characterize the optimal amount of information that can be transmitted online by the prophet to the follower, and provide applications to repeated games played by finite automata, and by players with bounded recall.},
url = {/files/dp316.pdf},
author = {Olivier Gossner, Penelope Hernandez and Abraham Neyman}
}
@booklet {sheshinski-oarpoa2003,
title = {Optimum and Risk-Class Pricing of Annuities},
journal = {Discussion Papers},
number = {327},
year = {2003},
month = {7},
abstract = {When information on longevity (survival functions) is unknown early in life, individuals have an interest to insure themselves against future {\textquoteright}risk-class{\textquoteright} classification. Accordingly, the First-Best typically involves transfers across states of nature. Competitive equilibrium cannot provide such transfers if insurance firms are unable to precommit their customers. On the other hand, public insurance plans that do not distinguish between {\textquoteright}risk-class{\textquoteright} realizations are also inefficient. It is impossible, a-priori, to rank these alternatives from a welfare point of view.},
url = {/files/dp327.pdf},
author = {Eytan Sheshinski}
}
@booklet {sheshinski-odrc2003,
title = {Optimum Delayed Retirement Credit},
journal = {Discussion Papers},
number = {329},
year = {2003},
month = {7},
abstract = {A central question for pension design is how benefits should vary with the age of retirement beyond early eligibility age. It is often argued that in order to be neutral with respect to individual retirement decisions benefits should be actuarially fair, that is, the present value of additional contributions and benefits ({\textquoteright}Delayed Retirement Credit{\textquoteright} - DRC) due to postponed retirement should be equal. We show that in a self-selection, asymmetric information model, because individual decisions are suboptimal, the socially optimal benefit structure should be less than actuarially fair.},
url = {/files/dp329.pdf},
author = {Eytan Sheshinski}
}
@booklet {maimaran-rtrteg2003,
title = {Reducing the Reluctance to Exchange Gambles},
journal = {Discussion Papers},
number = {341},
year = {2003},
month = {10},
publisher = {A Revised Version Was Published in Judgment and Decision Making 2011, 6(2), 147-155},
abstract = {Bar-Hillel and Neter (1996) found that although people are willing to trade identical objects, they are reluctant to trade identical lottery tickets. Is this simply due to the fact that these are gambles? It was found that if the value of the tickets is guaranteed to be ex-post, not just ex ante, identical, people are more willing to exchange them. Indeed, just the possibility of ex-post difference between the lottery tickets induces as much reluctance to exchange them as when ex-post difference is guaranteed. In addition, this study examines how the vividness of lottery tickets influences the willingness to trade them. Specifically, it examines whether people are equally reluctant to exchange lottery tickets (when given a bonus for doing so) when they cannot even distinguish between them (e.g., when the tickets are concealed in envelopes). When one cannot see the ticket, it is less vivid and it is harder to imagine it winning. Indeed, it was found that people are more willing to exchange when they cannot distinguish between the tickets than when they can.In 2011, a~revised version of this paper was published under the title To trade or not to trade:~ The moderating role of vividness when exchanging gambles in Judgment and Decision Making,~6, 147-155.~ In the link todp341, it follows the~original manuscript.~},
url = {/files/dp341.pdf},
author = {Michal Maimaran}
}
@booklet {mas-colell-rcd2003,
title = {Regret-Based Continuous-Time Dynamics},
journal = {Discussion Papers},
number = {309},
year = {2003},
month = {1},
publisher = {Games and Economic Behavior 45 (2003), 375-394},
abstract = {Regret-based dynamics have been introduced and studied in the context of discrete-time repeated play. Here we carry out the corresponding analysis in continuous time. We observe that, in contrast to (smooth) fictitious play or to evolutionary models, the appropriate state space for this analysis is the space of distributions on the product of the players{\textquoteright} pure action spaces (rather than the product of their mixed action spaces). We obtain relatively simple proofs for some results known in the discrete case (related to "no-regret" and correlated equilibria), and also a new result on two-person potential games (for this result we also provide a discrete-time proof).},
url = {/files/ regret.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {arianelambertmogiliansky-tiamotk2003,
title = {Type Indeterminacy: A Model of the KT(Kahneman-Tversky)-Man},
journal = {Discussion Papers},
number = {343},
year = {2003},
month = {11},
abstract = {In this note we propose to use the mathematical formalism of Quantum Mechanics to capture the idea that agents preferences, in addition to being typically uncertain, can also be indeterminate. They are determined (realized, and not merely revealed) only when the action takes place. An agent is described by a state which is asuperposition of potential types (or preferences or behaviors). This superposed state is projected (or collapses ) onto one of the possible behaviors at the time of the interaction. In addition to the main goal of modelling uncertainty of preferences which is not due to lack of information, this formalism, seems to be adequate to describe widely observed phenomena like framing and instances of noncommutativityin patterns of behavior. We propose two experiments to test the theory.},
url = {/files/ Zamir343.pdf},
author = {Ariane Lambert Mogiliansky, Shmuel Zamir and Herv\copyright Zwirn}
}
@booklet {tombaker-tvouilaea2003,
title = {Virtues of Uncertainty in Law: An Experimental Approach, The},
journal = {Discussion Papers},
number = {310},
year = {2003},
month = {2},
publisher = {Iowa Law Review 89 (2004)},
abstract = {Predictability in civil and criminal sanctions is generally understood as desirable. Conversely, unpredictability is condemned as a violation of the rule of law. This paper explores predictability in sanctioning from the point of view of efficiency. It is argued that, given a constant expected sanction, deterrence is increased when either the size of the sanction or the probability that it will be imposed is uncertain. This conclusion follows from earlier findings in behavioral decision research and the results of an experiment conducted specifically to examine this hypothesis. The findings suggest that, within an efficiency framework, there are virtues to uncertainty that may cast doubt on the premise that law should always strive to be as predictable as possible.},
url = {/files/dp310.pdf},
author = {Tom Baker, Alon Harel and Tamar Kugler}
}
@booklet {samuel-cahn-wiocd2003,
title = {Why Is One Choice Different?},
journal = {Discussion Papers},
number = {335},
year = {2003},
month = {9},
publisher = {Journal of Statistical Planning and Inference 130 (2005), 127-132},
abstract = {Let Xi be nonnegative independent random variables with finite expectations and Xn* = max X1,..., Xn. The value Xn* is what can be obtained by a "prophet". A "mortal" onthe other hand, may use k {\textquoteright}{\textyen} 1 stopping rules t1,...,tk yielding a return E[max i = 1,...,k X ti]. For n {\textquoteright}{\textyen} k the optimal return is Vkn (X1,...,Xn) = sup E[max i = 1,...,k X ti] where the supremum is over all stopping rules which stop by time n. The well known "prophet inequality" states that for all such Xi{\textquoteright}s and one choice EXn* < 2 V1n (X1,...,Xn) and the constant "2" cannot be improved on for any n {\textquoteright}{\textyen} 2. In contrast we show that for k=2 the best constant d satisfying EXn* < d V2n (X1,...,Xn) for all such Xi{\textquoteright}s depends on n. On the way we obtain constants ck such that EXk+1* < ck Vkk+1 (X1,...,Xk+1).},
url = {/files/dp335.pdf},
author = {Samuel-Cahn, David Assaf and Ester}
}
@booklet {neeman-acamda2002,
title = {Against Compromise: A Mechanism Design Approach},
journal = {Discussion Papers},
number = {290},
year = {2002},
month = {5},
publisher = {Journal of Law, Economics, and Organization 21 (2005), 285-314},
abstract = {We consider the following situation. A risk-neutral plaintiff sues a risk-neutral defendant for damages that are normalized to one. The defendant knows whether she is liable or not, but the plaintiff does not. We ask what are the settlement procedure and fee-shifting rule (which, together, we call a mechanism) that minimize the rate of litigation subject to maintaining deterrence. Two main results are presented. The first is a characterization of an upper bound on the rate of settlement that is consistent with maintaining deterrence. This upper bound is shown to be independent of the litigants{\textquoteright} litigation cost. It is further shown that any mechanism that attains this upper bound must employ the English fee-shifting rule according to which all litigation costs are shifted to the loser in trial. The second result describes a simple practicable mechanism that attains this upper bound. We discuss our results in the context of recent legal reforms in the U.S. and U.K.},
url = {/files/dp290.pdf},
author = {Neeman, Alon Klement and Zvika}
}
@booklet {pitowsky-botoomabtoqp2002,
title = {Betting on the Outcomes of Measurements: A Bayesian Theory of Quantum Probability},
journal = {Discussion Papers},
number = {304},
year = {2002},
month = {11},
publisher = {Studies in History and Philosophy of Science Part B: Studies in History and Philosophy of Modern Physics 34 (2003), 395-414},
abstract = {We develop a systematic approach to quantum probability as a theory of rational betting in quantum gambles. In these games of chance the agent is betting in advance on the outcomes of several (finitely many) incompatible measurements. One of the measurements is subsequently chosen and performed and the money placed on the others is returned to the agent. If the rules of rationality are followed one obtains the peculiarities of quantum probability, the uncertainty relations and the EPR paradox among others. The consequences of this approach for hidden variables and quantum logic are analyzed.},
url = {/files/dp304.pdf},
author = {Itamar Pitowsky}
}
@booklet {peleg-ccoagfbef2002,
title = {Complete Characterization of Acceptable Game Forms by Effectivity Functions},
journal = {Discussion Papers},
number = {283},
year = {2002},
month = {1},
publisher = {Published as},
abstract = {Acceptable game forms were introduced in Hurwicz and Schmeidler (1978). Dutta (1984) considered effectivity functions of acceptable game forms. This paper unifies and extends the foregoing two papers. We obtain the following characterization of the effectivity functions of acceptable game forms: An effectivity function belongs to some acceptable game form if (i) it belongs to some Nash consistent game forms; and (ii) it satisfies an extra simple condition (our (3.1) or (4.2)). (Nash consistent game forms have already been characterized by their effectivity functions in Peleg et al. (2001).) As a corollary of our characterization we show that every acceptable game form violates minimal liberalism.},
url = {/files/dp283.pdf},
author = {Bezalel Peleg}
}
@booklet {gilula-teobcocritaactg2002,
title = {Effect of Between-Group Communication on Conflict Resolution in the Assurance and Chicken Team Games, The},
journal = {Discussion Papers},
number = {296},
year = {2002},
month = {10},
publisher = {Journal of Conflict Resolution 43 (2003), 326-339},
abstract = {We studied conflict resolution in two types of intergroup conflicts modeled as team games, a game of Assurance where the groups incentive to compete is purely fear, and a game of Chicken where the groups incentive to compete is purely greed. The games were operationalized as competitions between two groups with three players in each group. The players discussed the game with other ingroup members, after which they met with the members of the outgroup for a between group discussion, and finally had a within-group discussion before deciding individually whether to participate in their group s collective effort vis-a-vis the other group. We found that all groups playing the Assurance game managed to achieve the collectively efficient outcome of zero participation, whereas groups playing the Chicken game maintained a highly inefficient participation rate of 78\%. We conclude that communication between groups is very effective in bringing about a peaceful resolution if the conflict is motivated by mutual fear and practically useless if the conflict is motivated by mutual greed.},
url = {/files/dp296.pdf},
author = {Gilula, Gary Bornstein and Zohar, A.,}
}
@booklet {zamir-oteopsmeiafa2002,
title = {On the Existence of Pure Strategy Monotone Equilibria in Asymmetric First-Price Auctions},
journal = {Discussion Papers},
number = {292},
year = {2002},
month = {6},
publisher = {Econometrica 72 (2004), 1105-1125},
abstract = {We establish the existence of pure strategy equilibria in monotone bidding functions in first-price auctions with asymmetric bidders, interdependent values and affiliated one-dimensional signals. By extending a monotonicity result due to Milgrom and Weber (1982), we show that single crossing can fail only when ties occur at winning bids or when bids are individually irrational. We avoid these problems by considering limits of ever finer finite bid sets such that no two bidders have a common serious bid, and by recalling that single crossing is needed only at individually rational bids. Two examples suggest that our results cannot be extended to multidimensional signals or to second-price auctions.},
url = {/files/dp292.pdf},
author = {Zamir, Philip J. Reny and Shmuel}
}
@booklet {weiss-ffsts2002,
title = {Forecasting for Stationary Times Series},
journal = {Discussion Papers},
number = {300},
year = {2002},
month = {10},
abstract = {The forecasting problem for a stationary and ergodic binary time series Xn is to estimate the probability that Xn+1 = 1 based on the observations Xi, 0 i n without prior knowledge of the distribution of the process Xn. It is known that this is not possible if one estimates at all values of n. We present a simple procedure which will attempt to make such a prediction infinitely often at carefully selected stopping times chosen by the algorithm. We show that the proposed procedure is consistent under certain conditions, and we estimate the growth rate of the stopping times.},
url = {/files/dp300.pdf},
author = {Weiss, Gusztav Morvai and Benjamin, Nathans,}
}
@booklet {aumann-gtb22002,
title = {Game Theory, Bilbao 2000},
journal = {Discussion Papers},
number = {293},
year = {2002},
month = {7},
publisher = {Games and Economic Behavior 45 (2003), 2-14},
abstract = {The Presidential Address at the First International Congress of the Game Theory Society, held in Bilbao, Spain, in July of 2000. The address contains a discussion of the Congress, of the functions and activities of the Society, of the Logo of the Society, of past accomplishments of the discipline, and of some future directions for research. The address is preceded by an introduction by David Kreps.},
url = {{\textquoteright}},
author = {Robert J. Aumann}
}
@booklet {garybornstein-iagditcgagmp2002,
title = {Individual and Group Decisions in the Centipede Game: Are Groups More Rational Players?},
journal = {Discussion Papers},
number = {298},
year = {2002},
month = {9},
publisher = {Journal of Experimental Social Psychology 40 (2004), 599-605},
abstract = {Two experiments compared the Centipede game played either by 2 individuals or by 2 (3-person) groups. The 2 competitors alternate in deciding whether to take the larger portion of an increasing (or constant) pile of money, and as soon as one takes the game ends. Assuming that both sides are concerned only with maximizing their own payoffs (and that this is common knowledge), the game theoretic solution, derived by backward induction, is for the first mover to exit the game at the first decision node. Both experiments found that although neither individuals nor groups fully complied with this solution, groups did exit the game significantly earlier than individuals. The study of experimental games has uncovered many instances in which individuals deviate systematically from the game theoretic solution. This study is in accord with other recent experiments in suggesting that game theory may provide a better description of group behavior.},
url = {/files/dp298.pdf},
author = {Gary Bornstein, Tamar Kugler and Anthony Ziegelmeyer}
}
@booklet {sunstein-iai2002,
title = {Inequality and Indignation},
journal = {Discussion Papers},
number = {286},
year = {2002},
month = {3},
publisher = {Philosophy and Public Affairs 30 (2002), 60-82},
abstract = {Inequalities often persist because both the advantaged and the disadvantaged stand to lose from change. Despite the probability of loss, moral indignation can lead the disadvantaged to seek to alter the status quo, by encouraging them to sacrifice their material self-interest for the sake of equality. Experimental research shows that moral indignation, understood as a willingness to suffer in order to punish unfair treatment by others, is widespread. It also indicates that a propensity to apparently self-defeating moral indignation can turn out to promote people{\textquoteright}s material self-interest, if and because others will anticipate their actions. But potential rebels face collective action problems. Some of these can be reduced through the acts of "indignation entrepreneurs," giving appropriate signals, organizing discussions by like-minded people, and engaging in acts of self-sacrifice. Law is relevant as well. By legitimating moral indignation and dissipating pluralistic ignorance, law can intensify and spread that indignation, thus increasing its expression. Alternatively, law can delegitimate moral indignation, or at least raise the cost of its expression, thus stabilizing a status quo of inequality. But the effects of law are unpredictable, in part because it will have moral authority for some but not for others; here too heterogeneity is an issue both for indignation entrepreneurs and their opponents. Examples are given from a range of areas, including labor-management relations, sexual harassment, civil rights, and domestic violence.},
url = {/files/dp286.pdf},
author = {Sunstein, Edna Ullmann-Margalit and Cass R.}
}
@booklet {bornstein-icigaci2002,
title = {Intergroup Conflict: Individual, Group and Collective Interests.},
journal = {Discussion Papers},
number = {297},
year = {2002},
month = {8},
publisher = {Personality and Social Psychology Review, 7 (2003), 129-145},
abstract = {Intergroup conflicts generally involve conflicts of interests within the competing groups as well. This paper outlines a taxonomy of games, called team games, which incorporate the intragroup and intergroup levels of conflict. Its aims are to provide a coherent framework for analyzing the prototypical problems of cooperation and competition that arise within and between groups, and to review an extensive research program which has utilized this framework to study individual and group behavior in the laboratory. Depending on the game{\textquoteright}s payoff structure, contradictions or conflicts were created between the rational choices at the individual, group, and collective levels {\textendash} a generalization of the contradiction between individual and collective rationality occurring in the traditional mixed-motive games. These contradictions were studied so as to identify the theoretical and behavioral conditions that determine which level of rationality prevails.},
url = {/files/db297.pdf},
author = {Gary Bornstein}
}
@booklet {hart-lct2002,
title = {Long Cheap Talk},
journal = {Discussion Papers},
number = {284},
year = {2002},
month = {1},
publisher = {Econometrica 71 (2003), 1619-1660},
abstract = {With cheap talk, more can be achieved by long conversations than by a single message - even when one side is strictly better informed than the other.},
url = {/files/ long.html},
author = {Hart, Robert J. Aumann and Sergiu}
}
@booklet {yaakovkareev-otmov2002,
title = {On the Misperception of Variability},
journal = {Discussion Papers},
number = {285},
year = {2002},
month = {2},
publisher = {Journal of Experimental Psychology: General 131 (2002), 287-297},
abstract = {Ever since the days of Francis Bacon it has been claimed that people perceive the world as less variable and more regular than it actually is. Such misperception, if shown to exist, could explain a host of perplexing behaviors. However, the only evidence supporting the claim is indirect, and there is no explanation of its cause. As a possible cause, we suggest the use of sample variability as an estimate of population variability. This is so since the sampling distribution of sample variance is downward attenuated, the attenuation being substantial for sample sizes that people are likely to consider. The results of five experiments show that people use sample variability, uncorrected for sample size, in tasks in which a correction is normatively called for, and indeed perceive variability as smaller than it actually is.},
url = {/files/dp285.pdf},
author = {Yaakov Kareev, Sharon Arnon and Reut Horwitz-Zeliger}
}
@booklet {tam-mrrassp2002,
title = {Monotone Regrouping, Regression, and Simpson S Paradox},
journal = {Discussion Papers},
number = {305},
year = {2002},
month = {12},
publisher = {The American Statistician 57 (2003), 139-141},
abstract = {We show in a general setup that if data Y are grouped by a covariate X in a certain way, then under a condition of monotone regression of Y on X, a Simpson s type paradox is natural rather than surprising. This model was motivated by an observation on recent SAT data which are presented.},
url = {/files/db305.pdf},
author = {Tam, Yosef Rinott and Michael, Howlett,}
}
@booklet {zamir-anoreoaipa2002,
title = {A Note on Revenue Effects of Asymmetry in Private-Value Auctions},
journal = {Discussion Papers},
number = {291},
year = {2002},
month = {2},
abstract = {We formulate a way to study whether the asymmetry of buyers (in the sense of having different prior probability distributions of valuations) is helpful to the seller in private-value auctions (asked first by Cantillon [2001]). In our proposed formulation, this question corresponds to two important questions previously asked: Does a first-price auction have higher revenue than a second-price auction when buyers have asymmetric distributions (asked by Maskin and Riley[2000])? And does a seller enhance revenue by releasing information (asked by Milgrom and Weber[1982])? This is shown by constructing two Harsanyi games of incomplete information each having the same ex-ante distribution of valuations but in one beliefs are symmetric while in the other beliefs are sometimes asymmetric. Our main result is that answers to all three questions coincide when values are independent and are related when values are affiliated.},
url = {/files/dp291.pdf},
author = {Zamir, Todd R. Kaplan and Shmuel}
}
@booklet {motro-tpicictspsfaae2002,
title = {Parental Investment Conflict in Continuous Time: St. Peter S Fish as an Example, The},
journal = {Discussion Papers},
number = {307},
year = {2002},
month = {12},
abstract = {The parental investment conflict considers the question of how much each sex should invest in each brood, thereby characterizing different animal groups. Each such group usually adopts a certain parental care pattern: female-care only, male-care only, biparental care, or even no parental care at all. The differences in care patterns are usually explained by the different costs and benefits arising from caring for the offspring in each animal group. This paper proposes a game-theoretical model to the parental investment conflict based on the parental behavior in Cichlid fish. Cichlid fish exhibit different parental care patterns, allowing the examination of the factors which determine the particular behavior in each mating. We present a continuous time, two-stage, asymmetric game, with two types of players: male and female. According to the model s results, three parental care patterns: male-only care, female-only care and biparental care, are possible Evolutionarily Stable Strategies. Fixation depends on the investment costs and benefits, and on the initial conditions of the game. These results may explain the different parental care patterns observed in di erent animal groups as well as in Cichlid fish.},
url = {/files/dp307.pdf},
author = {Motro, Osnat Yaniv and Uzi}
}
@booklet {rinott-aptfm2002,
title = {A Permutation Test for Matching},
journal = {Discussion Papers},
number = {301},
year = {2002},
month = {10},
publisher = {Published as "A Permutation Test for Matching and Its Asymptotic Distribution", Metron 61 (2003)},
abstract = {We consider a permutation method for testing whether observations given in their natural pairing exhibit an unusual level of similarity in situations where any two observations may be similar at some unknown baseline level. Under a hypothesis where there is no distinguished pairing of the observations, a normal approximation, with explicit bounds and rates, is presented for determining approximate critical test levels.},
url = {/files/dp301.pdf},
author = {Rinott, Larry Goldstein and Yosef}
}
@booklet {brunobassan-pvoiig2002,
title = {Positive Value of Information in Games},
journal = {Discussion Papers},
number = {294},
year = {2002},
month = {8},
publisher = {International Journal of Game Theory 32 (2003), 17-31},
abstract = {We exhibit a general class of interactive decision situations in which all the agents benefit from more information. This class includes as a special case the classical comparison of statistical experiments ~ la Blackwell.More specifically, we consider pairs consisting of a game with incomplete information G and an information structure S such that the extended game {\textquotedblleft}(G,S) has a unique Pareto payoff profile u. We prove that u is a Nash payoff profile of {\textquotedblleft}(G,S), and that for any information structure that is coarser than S, all Nash payoff profiles of {\textquotedblleft}(G,T) are dominated by u. We then prove that our condition is also necessary in the following sense: Given any convex compact polyhedron of payoff profiles, whose Pareto frontier is not a singleton, there exists an extended game {\textquotedblleft}(G,S) with that polyhedron as the convex hull of feasible payoffs, an information structure T coarser than S and a player i who strictly prefers a Nash equilibrium in {\textquotedblleft}(G,T) to any Nash equilibrium in {\textquotedblleft}(G,S).},
url = {/files/dp294.pdf},
author = {Bruno Bassan, Olivier Gossner, Marco Scarsini and Shmuel Zamir}
}
@booklet {garybornstein-rpcbiabt2002,
title = {Repeated Price Competition Between Individuals and Between Teams},
journal = {Discussion Papers},
number = {303},
year = {2002},
month = {11},
publisher = {Journal of Economic Behavior and Organization (in Press)},
abstract = {We conducted an experimental study of price competition in a duopolistic market. The market was operationalized as a repeated game between two teams with one, two, or three players in each team. Each player simultaneously demanded a price, and the team whose total asking price was smaller won the competition and was paid its asked price. The losing team was paid nothing. In case of a tie, the teams split the asking price. For teams with multiple players we manipulated the way in which the team s profit was divided between the team members. In one treatment each team member was paid his or her asking price if the team won, and half that if the game was tied, while in the other treatment the team s profit for winning or tying the game was divided equally among its members. We found that asking (and winning) prices were significantly higher in competition between individuals than in competition between two- or three-person teams. There were no general effects of team size, but prices were sustained at a higher level when each team member was paid his or her own asked price than when the team s profits were divided equally.},
url = {/files/dp303.pdf},
author = {Gary Bornstein, Tamar Kugler and Reinhard Selten}
}
@booklet {aumann-raitt2002,
title = {Risk Aversion in The Talmud},
journal = {Discussion Papers},
number = {287},
year = {2002},
month = {5},
publisher = {Economic Theory 21 (2003), 233-239},
abstract = {Evidence is adduced that the sages of the ancient Babylonian Talmud, as well as some of the medieval commentators thereon, were well aware of sophisticated concepts of modern theories of risk-bearing.},
url = {/files/dp287.pdf},
author = {Robert J. Aumann}
}
@booklet {brunobassan-oscoet2002,
title = {On Stochastic Comparisons of Excess Times},
journal = {Discussion Papers},
number = {302},
year = {2002},
month = {10},
abstract = {A stationary renewal process based on iid random variables Xi is observed at a given time. The excess time, that is, the residual time until the next renewal event, is of course smaller than the total current X which consists of the residual time plus the current age. Nevertheless in certain types of data the distribution of the excess times is stochastically larger than that of Xi{\textquoteright}s. We find necessary and sufficient conditions that explain this phenomenon, and related results on stochastic orderings arising from observations on renewal processes.},
url = {/files/dp302.pdf},
author = {Bruno Bassan, Yosef Rinott and Yehuda Vardi}
}
@booklet {neyman-sgeotm2002,
title = {Stochastic Games: Existence of the MinMax},
journal = {Discussion Papers},
number = {295},
year = {2002},
month = {8},
publisher = {In Stochastic Games and Applications, A. Neyman and S. Sorin (Eds.), Kluwer Academic Press (2003)},
url = {/files/dp295.pdf},
author = {Abraham Neyman}
}
@booklet {weinberger-ottscp2002,
title = {On the Topological Social Choice Problem},
journal = {Discussion Papers},
number = {282},
year = {2002},
month = {1},
publisher = {Social Choice and Welfare 18 (2001), 227-250},
abstract = {Extending earlier work of Chichilnisky and Heal, we show that any connected space of the homotopy type of a finite complex admitting a continuous symmetric choice function respeting unanimity is contractible for any fixed finite number (>1) of agents. On the other hand, removing the finiteness condition on the homotopy type, we show that there are a number of non-contractible spaces that do admit such choice functions, for any number of agents, and, characterize precisely those spaces.},
url = {/files/dp282.pdf},
author = {Shmuel Weinberger}
}
@booklet {gershonben-shakhar-tbprtuotgktic2002,
title = {Trial by Polygraph: Reconsidering the Use of the Guilty Knowledge Technique in Court},
journal = {Discussion Papers},
number = {288},
year = {2002},
month = {5},
publisher = {Law and Human Behavior 26 (2002) 527-541},
abstract = {Polygraph test results are by and large ruled inadmissible evidence in criminal courts in the US, Canada and Israel. This is well-conceived with regard to the dominant technique of polygraph interrogation, known as the Control Question Technique (CQT), because it indeed does not meet the required standards for admissible scientific evidence. However, a lesser known and rarely practiced technique, known as the Guilty Knowledge Test (GKT), is capable, if carefully administered, of meeting the recently set Daubert criteria. This article describes the technique, and argues for considering its admissibility as evidence in criminal courts.},
url = {/files/dp288.pdf},
author = {Gershon Ben-Shakhar, 5a Bar-Hillel , Mordechai Kremnitzer}
}
@booklet {ullmann-margalit-tood2002,
title = {Trust Out of Distrust},
journal = {Discussion Papers},
number = {289},
year = {2002},
month = {5},
publisher = {The Journal of Philosophy 99 (2002), 532-548},
abstract = {The paper aims to establish the possibility of trust from within a Hobbesian framework. It shows that distrust situations can be structured in two ways, the first referred to as Hard and the second as Soft, both of which are compatible with Hobbes s stark assumptions about human nature. In Hard distrust situations (which are prisoner s-dilemma structured) the distrust strategy is dominant; in the Soft variety (which are stag-hunt structured) trust is an equilibrium choice. In order to establish the possibility of trust there is no need to claim that the state of nature is Soft rather than Hard, nor even that Soft is likelier. Game theoretical considerations show that all that is needed to give trust a chance is the ambiguity or uncertainty on the part of the players as to which of the two basic situations of distrust in fact obtains: which game was picked by Nature for them to play.},
url = {/files/dp289.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {davidassaf-tcos2002,
title = {Two Choice Optimal Stopping},
journal = {Discussion Papers},
number = {306},
year = {2002},
month = {12},
publisher = {Advances of Applied Probability 36 (2004), 1116-1147},
abstract = {Let Xn, . . . ,X1 be i.i.d. random variables with distribution function F. A statistician, knowing F, observes the X values sequentially and is given two chances to choose X s using stopping rules. The statistician s goal is to stop at a value of X as small as possible. Let V2n equal the expectation of the smaller of the two values chosen by the statistician when proceeding optimally. We obtain the asymptotic behavior of the sequence V2n for a large class of F s belonging to the domain of attraction (for the minimum) D(G{\textquoteright}$\pm$), where G{\textquoteright}$\pm$(x) = [1 {\textquoteright}{\textasciicircum}{\textquoteright} exp({\textquoteright}{\textasciicircum}{\textquoteright}x{\textquoteright}$\pm$)] I(x{\textquoteright}{\textyen}0). The results are compared with those for the asymptotic behavior of the classical one choice value sequence V1n ,as well as with the {\textquoteright}prophet value sequence E(minXn, . . . ,X1).},
url = {/files/dp306.pdf},
author = {David Assaf, Larry Goldstein and Ester Samuel-Cahn}
}
@booklet {mas-colell-udcltne2002,
title = {Uncoupled Dynamics Cannot Lead to Nash Equilibrium},
journal = {Discussion Papers},
number = {299},
year = {2002},
month = {9},
publisher = {American Economic Review 93 (2003), 1830-1836},
abstract = {We call a dynamical system uncoupled if the dynamic for each player does not depend on the payoffs of the other players. We show that there are no uncoupled dynamics that are guaranteed to converge to Nash equilibrium, even when the Nash equilibrium is unique.},
url = {/files/ uncoupl.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {winter-ci2001,
title = {Constitutional Implementation},
journal = {Discussion Papers},
number = {232},
year = {2001},
month = {1},
publisher = {Review of Economic Design (2002) 7, 187-204},
abstract = {We consider the problem of implementing a social choice correspondence H in Nash equilibrium when the constitution of the society is given by an effectivity function E. It is assumed that the effectivity function of H, E^H, is a sub-correspondence of E. We found necessary and efficient conditions for a game form Gamma to implement H (in Nash equilibria) and to satisfy, at the same time, that E^Gamma, the effectivity function of Gamma, is a sub-correspondence of E^H (which guarantees that Gamma is compatible with E). We also find sufficient conditions for the coincidence of the set of winning coalitions of E^Gamma and E^H, and for E^Gamma=E^H. All our results are sharp as is shown by suitable examples.},
url = {/files/dp232.pdf},
author = {Winter, Bezalel Peleg and Eyal}
}
@booklet {yaari-acmaldh2001,
title = {A Credit Market a La David Hume},
journal = {Discussion Papers},
number = {244},
year = {2001},
month = {6},
abstract = {In Book III of his Treatise of Human Nature, David Hume considers the following simple interaction: "I suppose a person to have lent me a sum of money, on condition that it be restor{\textquoteright}d in a few days, and also suppose, that after the expiration of the term agreed on, he demands the sum" and Hume asks: "What reason or motive have I to restore the money?" [1740, p. 479] The answer, he concludes, must be "that the sense of justice and injustice [which is the motive for repaying the loan] is not deriv{\textquoteright}d from nature, but arises artificially, tho{\textquoteright} necessarily, from education and human conventions." [p. 483] It is my purpose in this essay to offer formal (and modern) underpinnings for Hume{\textquoteright}s argument. I shall do so in the context of Hume{\textquoteright}s own example, cited above, where the interaction being considered is one between lender and borrower.},
url = {/files/dp244.pdf},
author = {Menahem E. Yaari}
}
@booklet {pradeepdubey-dapige2001,
title = {Default and Punishment in General Equilibrium},
journal = {Discussion Papers},
number = {241},
year = {2001},
month = {5},
abstract = {We extend the standard model of general equilibrium with incomplete markets to allow for default and punishment. The equilibrating variables include expected delivery rates, along with the usual prices of assets and commodities. By reinterpreting the variables, our model encompasses a broad range of moral hazard, adverse-selection, and signalling phenomena (including the Akerloflemons model and Rothschild-Stiglitz insurance model) in a general equilibrium framework. We impose a condition on the expected delivery rates for untraded assets that is similar to the trembling hand refinements used in game theory. Despite earlier claims about the nonexistence of equilibrium with adverse selection, we show that equilibrium always exists, even with exclusivity constraints on asset sales, and transactions-liquidity costs or information-evaluation costs for asset trade. We show that more lenient punishment which encourages default may be Pareto improving because it allows for better risk spreading. We also show that default opens the door to a theory of endogenous assets.},
url = {/files/dp241.pdf},
author = {Pradeep Dubey, John Geanakoplos and Martin Shubik}
}
@booklet {sudholter-tdpotbs2001,
title = {Dummy Paradox of the Bargaining Set, The},
journal = {Discussion Papers},
number = {256},
year = {2001},
month = {6},
publisher = {International Journal of Mathematics, Game Theory and Algebra, 12 (2002), 443-446. Also in L.A. Petrosjan \& V.V. Mazalov (eds.) Game Theory and Applications, Vol. 8, Nova Science Publishers, New York (2002),119-124.},
abstract = {By means of an example of a superadditive 0-normalled game, we show that the maximum payoff to a dummy in the bargaining set may decrease when the marginal contribution of the dummy to the grand coalition becomes positive.},
url = {/files/dp256.pdf},
author = {Sudholter, Bezalel Peleg and Peter}
}
@booklet {jacob-aerftltooitl2001,
title = {An Economic Rationale for the Legal Treatment of Omissions in Tort Law},
journal = {Discussion Papers},
number = {281},
year = {2001},
month = {12},
publisher = {Theoretical Inquiries in Law 3 (2002).},
abstract = {This paper provides an economic justification for the exemption from liability for omissions and for the exceptions to this exemption. It interprets the differential treatment of acts and omissions in tort law as a proxy for a more fundamental distinction between harms caused by multiple injurers each of whom can single-handedly prevent the harm (either by acting or failing to act) and harms caused by a single injurer (either by acting or failing to act). Since the overall cost to which a group of injurers is exposed is constant, attributing liability to many injurers reduces the part each has to pay and consequently reduces one{\textquoteright}s incentives to take precautions. The broad exemption from liability for omissions is a way of carving a simple, practical rule to distinguish between the typical cases in which an agent can be easily selected and provided with sufficient incentives (typically, cases of acts) and cases in which there is a serious problem of dilution of liability (typically, cases of omissions). The exceptions to the rule exempting from responsibility for omissions are also explained in terms of efficiency. The imposition of liability for omissions depends on the ability to identify a salient agent, i.e., to single out one or few legally responsible agents and differentiate their role from that of others. Tort law designs three types of "salience rules." It either creates salience directly (by attributing liability to a single agent), or it can exploit salience created "naturally", or it can induce injurers to create salience voluntarily.},
url = {/files/dp281.pdf},
author = {Jacob, Alon Harel and Assaf}
}
@booklet {goren-teoocoibaopitipdg2001,
title = {Effect of Out-Group Competition on Individual Behavior and Out-Group Perception in the Intergroup Prisoner{\textquoteright}s Dilemma (IPD) Game, The},
journal = {Discussion Papers},
number = {271},
year = {2001},
month = {9},
publisher = {Group Processes and Intergroup Relations 4 (2001), 160-182.},
abstract = {Hebrew University of Jerusalem students participated in two experiments of repeated play of the Intergroup Prisoners{\textquoteright} Dilemma (IPD) game, which involves conflict of interests between two groups and, simultaneously, within each group. The experiments manipulated the level of competition exhibited by the out-group members (i.e., their level of contribution to their group{\textquoteright}s effort in the conflict). Consistent with the hypothesis that participants use strategies of reciprocal cooperation between groups, higher levels of out-group competition caused participants to increase their contribution and lower levels caused them to decrease it. In addition, participants had accurate recall of the contribution levels of out-group members, and they attributed motivations to out-group members in a manner that reflected their level of contribution. The nature of reciprocation with the out-group is discussed in light of both behavioral and cognitive data.},
url = {/files/dp271.doc},
author = {Harel Goren}
}
@booklet {haimanko-eatoot2001,
title = {Envy and the Optimality of Tournaments},
journal = {Discussion Papers},
number = {250},
year = {2001},
month = {6},
abstract = {We show that tournaments tend to outperform piece-rate contracts when there is sufficient envy among the agents.},
url = {{\textquoteright}},
author = {Haimanko, Pradeep Dubey and Ori, B.,}
}
@booklet {bracht-eolmoegd2001,
title = {Estimation of Learning Models on Experimental Game Data},
journal = {Discussion Papers},
number = {243},
year = {2001},
month = {6},
abstract = {The objective of this paper is both to examine the performance and to show properties of statistical techniques used to estimate learning models on experimental game data. We consider a game with unique mixed strategy equilibrium. We discuss identification of a general learning model and its special cases, reinforcement and belief learning, and propose a paramaterization of the model. We conduct Monte Carlo simulations to evaluate the finite sample performance of two kinds of estimators of a learning model{\textquoteright}s parameters. Maximum likelihood estimators of period to period transitions and mean squared deviation estimators of the entire path of play. In addition, we investigate the performance of a log score estimator of the entire path of play and a mean squared deviation estimator of period to period transitions. Finally, we evaluate a mean squared estimator of the entire path of play with observed actions averaged over blocks, instead of behavioral strategies. We propose to estimate the learning model by maximum likelihood estimation as this method performs well on the sample size used in practice if enough cross sectional variation is observed.},
url = {/files/dp243.pdf},
author = {Bracht, Hidehiko Ichimura and Juergen}
}
@booklet {itzhakvenezia-eviaasea2001,
title = {Exclusive Vs. Independent Agents: A Separating Equilibrium Approach},
journal = {Discussion Papers},
number = {237},
year = {2001},
month = {2},
publisher = {Journal of Economic Behavior and Organization 40 (1999), 443-456.},
abstract = {We provide a separating equilibrium explanation for the existence of the independent insurance agent system despite the potentially higher costs of this system compared to those of the exclusive agents system (or direct underwriting). A model is developed assuming asymmetric information between insurers and insureds; the formers do not know the riskiness of the latter. We also assume that the claims service provided by the independent agent system to its clients is superior to that offered by direct underwriting system, that is, insureds using the independent agent system are more likely to receive reimbursement of their claims. Competition compels the insurers to provide within their own system the best contract to the insured. It is shown that in equilibrium the safer insureds choose direct underwriting, whereas the riskier ones choose independent agents. The predictions of the model agree with previous research demonstrating that the independent agent system is costlier than direct underwriting. The present model suggests that this does not result from inefficiency but rather from self-selection. The empirical implication of this analysis is that, ceteris paribus, the incidence of claims made by clients of the independent agents system is higher than that of clients of direct underwriting. Implications for the co-existence of different distribution systems due to unbundling of services in other industries such as brokerage houses and the health care industry are discussed.},
url = {/files/dp237.doc},
author = {Itzhak Venezia, Dan Galai and Zur Shapira}
}
@booklet {klausabbink-tfpettbcanciasg2001,
title = {Fisherman{\textquoteright}s Problem: Exploring the Tension Between Cooperative and Non-Cooperative Concepts in a Simple Game, The},
journal = {Discussion Papers},
number = {238},
year = {2001},
month = {2},
publisher = {Journal of Economic Psychology 24 (2003), 425-445},
abstract = {We introduce and experiment the Fisherman s Game in which the application of economic theory leads to four different benchmarks. Non-cooperative sequential rationality predicts one extreme outcome while the core (which coincides with the competitive market equilibrium) predicts the other extreme. Intermediate, disjoint outcomes are predicted by fairness utility models and the Shapley value. Non of the four benchmarks fully explains the observed behavior. However, since elements of both cooperative and non-cooperative game theory are crucial for organizing our data, we conclude that effort towards bridging the gap between the various concepts is a promising approach for future economic research.},
url = {/files/dp238.pdf},
author = {Klaus Abbink, Ron Darziv, Zohar Gilula, Harel Goren, Bernd Irlenbusch, Arnon Keren, Bettina Rockenbach, Abdolkarim Sadrieh, Reinhard Selten and Shmuel Zamir}
}
@booklet {kalai-afpftcpaat2001,
title = {A Fourier-Theoretic Perspective for the Condorcet Paradox and Arrow{\textquoteright}s Theorem},
journal = {Discussion Papers},
number = {280},
year = {2001},
month = {11},
publisher = {Advances in Applied Mathematics 29 (2002), 412-426},
abstract = {We describe a Fourier-theoretic formula for the probability of rational outcomes for random profiles for a social choice function on three alternatives. Several applications are given.},
url = {/files/dp280.pdf},
author = {Gil Kalai}
}
@booklet {simon-goiietatmobe2001,
title = {Games of Incomplete Information, Ergodic Theory, and the Measurability of Bayesian Equilibria},
journal = {Discussion Papers},
number = {254},
year = {2001},
month = {6},
abstract = {This paper discusses the difference between Harsanyi and Bayesian equilibria for games of incomplete information played onuncountable belief spaces. A conjecture belonging to ergodic theory is presented. If the conjecture were valid then there would exist a game played on an uncountable belief space with a common prior for which there are Bayesian equilibria but no Harsanyi equilibrium.},
url = {/files/dp254.pdf},
author = {Robert Samuel Simon}
}
@booklet {yigalattali-gwtpocaimtiaapv2001,
title = {Guess Where: The Position of Correct Answers in Multiple-Choice Test Items as a Psychometric Variable},
journal = {Discussion Papers},
number = {251},
year = {2001},
month = {6},
publisher = {Journal of Educational Measurement, 40 (2003), 109-128},
abstract = {In this paper, we show that test makers and test takers have a strong and systematic tendency for hiding correct answers {\textendash} or, respectively, for seeking them {\textendash} in middle positions. In single, isolated questions, both prefer middle positions over extreme ones in a ratio of up to 3 or 4 to 1. Because test makers routinely, deliberately and excessively balance the answer key of operational tests, middle bias almost, though not quite, disappears in those keys. Examinees taking real tests also produce answer sequences that are more balanced than their single question tendencies, but to a lesser extent than the correct key. In a typical 4-choice test, about 55\% of erroneous answers (which are the only answers whose position is determined by the test taker, not the test maker) are in the two central positions. We show that this bias is large enough to have real psychometric consequences, as questions with middle correct answers are easier and {\textendash} what{\textquoteright}s more important {\textendash} less discriminating than questions with extreme correct answers, a fact some of whose implications we explore.},
url = {/files/dp251.pdf},
author = {Yigal Attali, Maya Bar-Hillel}
}
@booklet {heifetz-ii2001,
title = {Incomplete Information},
journal = {Discussion Papers},
number = {248},
year = {2001},
month = {6},
publisher = {Handbook of Game Theory, with Economic Applications, Vol. III, R. J. Aumann and S. Hart (eds.), Elsevier North-Holland (2002).},
abstract = {In interactive contexts such as games and economies, it is important to take account not only of what the players believe about substantive matters (such as payoffs), but also of what they believe about the beliefs of other players. Two different but equivalent ways of dealing with this matter, the semantic and the syntactic, are set forth. Canonical and universal semantic systems are then defined and constructed, and the concepts of common knowledge and common priors formulated and characterized. The last two sections discuss relations with Bayesian games of incomplete information and their applications, and with interactive epistemology - the theory of multi-agent knowledge and belief as formulated in mathematical logic.},
url = {/files/dp248.pdf},
author = {Heifetz, Robert J. Aumann and Aviad}
}
@booklet {bornstein-tipdgaamoic2001,
title = {Intergroup Prisoner{\textquoteright}s Dilemma Game as a Model of Intergroup Conflict, The},
journal = {Discussion Papers},
number = {270},
year = {2001},
month = {9},
publisher = {L. Backman \& C. von Hofsten (Eds.) Psychology at the Turn of the Millenium: Social, Developmental and Clinical Perspectives.},
abstract = {Intergroup conflicts are characterized by conflicts of interests within the competing groups as well. The intragroup conflict stems from a basic fact: while all group members are better off if they all cooperate in competing against the outgroup, each individual group member is better off defecting. The Intergroup Prisoner{\textquoteright}s Dilemma (IPD) game is proposed as a theoretical framework for combining the intragroup and intergroup levels of conflict. This framework is used to examine major issues concerning individual and group behavior in intergroup conflict. These include: the effect of real intergroup conflict on intragroup cooperation; the motivational basis of cooperation; the distinction between non-cooperative groups, unitary groups, and individuals; and alternative routes to conflict resolution.},
url = {{\textquoteright}},
author = {Gary Bornstein}
}
@booklet {kalai-laroc2001,
title = {Learnability and Rationality of Choice},
journal = {Discussion Papers},
number = {261},
year = {2001},
month = {8},
publisher = {Journal of Economic Theory 113 (2003), 104-117},
abstract = {The purpose of this paper is to examine the extent to which the concepts of individual and collective choice unsed in economic theory desribe "predictable" or "learnable" behavior. Given a set X of N alternatives, a choice function c is a mapping which assigns to nonempty subsets S of X an element c(S) of S. A rational choice function is one for which there is a linear ordering on the alternatives such that c(S) is the maximal element of S according to that ordering. Using the basic concept of PAC-learnability from statistical learning theory we define a class of choice functions on a ground set of N elements as learnable if it is possible to predict, with small amount of error, the chosen element from a set A after viewing a "few examples." Here, "few" means a polynomial number in N. Learnability is quite a strict condition on a class of choice functions. The main point we discuss in this regard are: The class of rational choice function can be learned quickly and efficiently. Various natural classes of choice functions, which represent indidivual choices and strategic choices of several interacting agents, are learnable. The class of rational choice functions has superior learnability properties in comparison to other classes. We make the conjecture that classes of choice functions that represent a genuine aggregation of individual choices in a large society are never learnable. We also ask to what extent learnability can replace or reinforce the rationality hypothesis in some economic situations.},
url = {/files/db261.pdf},
author = {Gil Kalai}
}
@booklet {samuel-cahn-lcop2001,
title = {Lewis Carroll{\textquoteright}s Obtuse Problem},
journal = {Discussion Papers},
number = {235},
year = {2001},
month = {1},
publisher = {Teaching Statistics 23 (2001), 72-75},
abstract = {Carroll{\textquoteright}s apparently impeccable solution to one of his probability problems is shown to answer another problem that is based on reasonable assumptions. His original assumptions, however, are self-contradictory, hence entailing paradoxical results.},
url = {/files/db235.doc},
author = {Samuel-Cahn, Ruma Falk and Ester}
}
@booklet {simon-lfks2001,
title = {Locally Finite Knowledge Structures},
journal = {Discussion Papers},
number = {275},
year = {2001},
month = {9},
abstract = {With respect to the S5 multi-agent epistemic logic, we define a cell to be a minimal subset of knowledge structures known in common semantically by all the agents. A cell has finite fanout if at every knowledge strcuture every agent considers only a finite number of other knowledge structures to be possible. A set of formulas in common knowledge is finitely generated if the common knowledge of some finite subset implies the common knowledge of the whole set. For every finitely generated set of formulas held in common knowledge at some knowledge structure either this set determines uniquely a finite cell or there are uncountable many cells of finite fanout (and also uncountably many cells of uncountable size) at which exactly this set of formulas is known in common. The situation is very different, however, for sets of formulas held in common knolwedge that are not finitely generated - if there are uncountably many corresponding cells then either none of these cells or all of them could have finite fanout.},
url = {/files/db275.pdf},
author = {Robert Samuel Simon}
}
@booklet {vulkan-mvnteocm2001,
title = {Markets Versus Negotiations: the Emergence of Centralized Markets},
journal = {Discussion Papers},
number = {239},
year = {2001},
month = {2},
abstract = {We study the incentives of privately informed traders who have access to two forms of trade: direct negotiations with a small number of buyers and sellers (or decentralized trade), and centralized markets with a relatively large number of buyers and sellers. We show that "weak" trader types (that is, buyers with a high willingness to pay and sellers with low costs) will prefer to trade through centralized markets. This leads to a complete unraveling of direct negotiations, so that ultimately, all "serious" buyers and sellers opt for trading through the centralized market. Once this happens, no trader can pro{\textquoteright}{\textquoteright}{\textasciimacron}tably trade through direct negotiations.},
url = {/files/db239.pdf},
author = {Vulkan, Zvika Neeman and Nir}
}
@booklet {bezalelpeleg-ncrocarttgp2001,
title = {Nash Consistent Representation of Constitutions: A Reaction to the Gibbard Paradox},
journal = {Discussion Papers},
number = {258},
year = {2001},
month = {7},
publisher = {Mathematical Social Sciences 43 (2002), 267-287},
abstract = {The concept of an effectivity function is adopted as a formal model of a constitution. A game form models the actions available and permissible to individuals in a society. As a representation of the constitution such a game form should endow each group in society with the same power as it has under the constitution. Another desirable property is Nash consistency of the game form: Whatever the individual preferences, the resulting game should be minimally stable in the sense of possessing a Nash equilibrium. A first main result of the paper is a characterization of all effectivity functions that have a Nash consistent representation for the case without special structure on the sent of alternatives (social states). Next, a similar result is derived for the case where the set of alternatives is a compact metric space and the effectivity function is topological. As a sepcial case, veto functions are considered. Further results concern Pareto optimality of Nash equilibrium outcomes.},
url = {/files/db258.pdf},
author = {Bezalel Peleg, Hans Peters and Ton Storcken}
}
@booklet {peleg-anoaaotcomg2001,
title = {A Note on an Axiomatization of the Core of Market Games},
journal = {Discussion Papers},
number = {240},
year = {2001},
month = {5},
publisher = {Mathematics of Operations Research 27 (2002), 441-444},
abstract = {As shown by Peleg, the core of market games is characterized by nonemptiness, individual rationality, superadditivity, the weak reduced game property, the converse reduced game property, and weak symmetry. It was not known whether weak symmetry was logically independent. With the help of a certain transitive 4-person TU game it is shown that weak symmetry is redundant in this result. Hence the core on market games is axiomatized by the remaining five properties, if the universe of players contains at least four members.},
url = {/files/dp240.pdf},
author = {Peleg, Peter Sudholter and Bezalel}
}
@booklet {judithavrahami-tpgetaonitfoc2001,
title = {Parasite Game: Exploiting the Abundance of Nature in the Face of Competition, The},
journal = {Discussion Papers},
number = {245},
year = {2001},
month = {6},
publisher = {Published as \"Games of Competition in a Stochastic Environment\", Theory and Decision 59 (2005), 255-294},
abstract = {A situation in which the regularity in nature can be utilized while competition is to be avoided is modeled by the Parasite game. In this game regular behavior could enhance guessing nature but strategic randomization is required to avoid being outguessed. In an experiment, 60 pairs of participants (partner design) played many rounds of the Parasite game. The treatements differed in nature{\textquoteright}s probabilities and whether or not these probabilities were announced in advance or oculd only be experienced over time. Before playing, the working memory (WM) of participants was measured. Data analyses test the correspondence of participants{\textquoteright} behavior to game-theoretic benchmarks and the effect of participants{\textquoteright} WM on their behavior.},
url = {/files/dp245.pdf},
author = {Judith Avrahami, Werner Guth and Kareev, Yaakov}
}
@booklet {venezia-pobopmaii2001,
title = {Patterns of Behavior of Professionally Managed and Independent Investors},
journal = {Discussion Papers},
number = {253},
year = {2001},
month = {6},
publisher = {Journal of Banking and Finance 25 (2001), 1573-1587.},
abstract = {In this paper, we analyze the investment patterns of a large number of clients of a major Israeli brokerage house during 1994. We compare the behavior of clients making independent investment decisions to that of investors whose accounts were managed by brokerage professionals. Our main objective is to investigate whether the disposition effect (i.e., the tendency to sell winners quicker than losers), demonstrated in the US only for individual investors, also holds for professional investors. This analysis is important, as accepted financial theory predicts that prices are determined mainly by decisions made by professionals. We show that both professional and independent investors exhibit the disposition effect, although the effect is stronger for independent investors. The second objective of our study is the comparison of trade frequency, volume and profitability between independent and professionally managed accounts. We believe that these comparisons not only provide insights of their own, but also help to put the differences in the disposition effect in a wider perspective. We demonstrate that professionally managed accounts were more diversified and that round trips were both less correlated with the market and slightly more profitable than those of independent accounts.},
url = {/files/dp253.doc},
author = {Venezia, Zur Shapira and Itzhak}
}
@booklet {gooniorshan-tpcoacg2001,
title = {Positive Core of a Cooperative Game, The},
journal = {Discussion Papers},
number = {268},
year = {2001},
month = {8},
publisher = {International Journal of Game Theory 39 (2010), 113 "36},
abstract = {The positive core is a nonempty extension of the core of transferable utility games. If the core is nonempty, then it coincides with the core. It shares many properties with the core. Six well-known axioms which are employed in some axiomatizations of the core, the prenucleolus, or the positive prekernel, and one new intuitive axiom, characterize the positive core on any infinite universe of players. This new axiom requires that the solution of a game, whenever it is nonempty, contains an element which is invariant under any symmetry of the game.},
url = {/files/dp_268.pdf},
author = {Gooni Orshan, Peter Sudholter}
}
@booklet {albertblarer-pmftfofspbfb2001,
title = {Possible Mechanisms for the Formation of Flower Size Preferences by Foraging Bumblebees},
journal = {Discussion Papers},
number = {233},
year = {2001},
month = {1},
publisher = {Journal of Ethology 108 (2003), 341-351},
abstract = {Large flowers often contain larger nectar rewards, and receive more pollinator visits, than small flowers. We studied possible behavioral mechanisms underlying the formation of flower size preferences in bumblebees, using a two-phase laboratory experiment. Experimentally naive Bombus terrestris (L.) foraged on artificial flowers that bore either a big (3.8cm diameter) or a small (2.7cm diameter) display of a uniform color. Only flowers of one display size contained nectar rewards. We changed the display color and the locations of big and small flowers in the second experiment phase. We recorded the bees{\textquoteright} choices in both phases. Almost one half of the bees (41) made their first visit to a small flower. The bees learned to associate display size with food reward, and chose rewarding flowers with >85 accuracy by the end of each experimental phase. Some learning occurred within the bees{\textquoteright} first three flower visits. Learning of the size-reward association was equally good for big and small displays in the first experimental phase, but better for small displays in the second phase. Formation of size-reward associations followed a similar course in both phases. This suggests that the bees did not apply their experience from the first learning phase to the new situation of the second phase. Rather, they treated each phase of the experiment as an independent learning task. Our results suggest that associative learning is involved in the formation of preferences for large displays by bees. Moreover, bees that had learned to prefer large displays in one foraging situation may not transfer this preference to a novel situation that is sufficiently different. We propose that this feature of the bees{\textquoteright} behavior can select for honest advertising in flowers.},
url = {{\textquoteright}},
author = {Albert Blarer, Tamar Keasar and Avi Shmida}
}
@booklet {cassrsunstein-pij2001,
title = {Predictably Incoherent Judgements},
journal = {Discussion Papers},
number = {273},
year = {2001},
month = {9},
publisher = {Stanford Law Review 54 (2002), 1153-1216},
abstract = {When people make moral or legal judgments in isolation, they produce a pattern of outcomes that they would themselves reject, if only they could see that pattern as a whole. A major reason is that human thinking is category-bound. When people see a case in isolation, they spontaneously compare it to other cases that are mainly drawn from the same category of harms. When people are required to compare cases that involve different kinds of harms, judgments that appear sensible when the problems are considered separately often appear incoherent and arbitrary in the broader context. Another major source of incoherence is what we call the translation problem: The translation of moral judgments into the relevant metrics of dollars and years is not grounded in either principle or intuition, and produces large differences among people.. The incoherence produced by category-bound thinking is illustrated by an experimental study of punitive damages and contingent valuation. We also show how category-bound thinking and the translation problem combine to produce anomalies in administrative penalties. The underlying phenomena have large implications for many topics in law, including jury behavior, the valuation of public goods, punitive damages, criminal sentencing, and civil fines. We consider institutional reforms that might overcome the problem of predictably incoherent judgments. Connections are also drawn to several issues in legal theory, including valuation of life, incommensurability, and the aspiration to global coherence in adjudication.},
url = {/files/dp273.doc},
author = {Cass R. Sunstein, Daniel Kahneman, David Schkade and Ritov, Ilana}
}
@booklet {davidassaf-rpiwtmhsc2001,
title = {Ratio Prophet Inequalities When the Mortal Has Several Choices},
journal = {Discussion Papers},
number = {236},
year = {2001},
month = {2},
publisher = {Annals of Applied Probability 12 (2002) 972-984.},
abstract = {Let X_i be non-negative, independent random variables with finite expectation, and X*_n=maxX_1,...,X_n. The value EX*_n is what can be obtained by a "prophet". A "mortal" on the other hand, may use k>=1 stopping rules t_1,...,t_k, yielding a return of E[max_i=1,...,k X_t_i]. For n>=k the optimal return is V^n_k(X_1,...,X_n)=supE[max_i=1,...,k X_t_i] where the supremum is over all stopping rules t_1,...t_k such that P(t_i},
url = {/files/dp236.pdf},
author = {David Assaf, Larry Goldstein and Ester Samuel-Cahn}
}
@booklet {aumann-trfm2001,
title = {Rationale for Measurability, The},
journal = {Discussion Papers},
number = {260},
year = {2001},
month = {7},
publisher = {In G. Debreu, W. Neuefeind \& W. Trockel (eds.) Economics Essays, A Festschrift for Werner Hildenbrand Springer, Berlin (2001), 5-7},
abstract = {When modelling large economies by nonatomic measure spaces of agents, one defines "coalitions" as measurable - not arbitrary - sets of agents. Here we suggest a rationale for this restriction: "Real" economies have finitely many agents. In them, coalitions are associated with various measures, like total endowment, which play a vital role in the analysis. So in the model, too, one should be able to associate similar measures with coalitions; this means that they must be "measurable." Thus, though in the finite case a coalition is simply an arbitrary set of players, the appropriate generalization to the infinite case is not an arbitrary but a measurable set.},
url = {/files/dp260.pdf},
author = {Robert J. Aumann}
}
@booklet {gilkalai-rcfbmr2001,
title = {Rationalizing Choice Functions by Multiple Rationales},
journal = {Discussion Papers},
number = {278},
year = {2001},
month = {11},
publisher = {Econometrica 70 (2002), 2481-2488.},
abstract = {The paper presents a notion of rationalizing choice functions that violate the Independence of Irrelevant Alternatives axiom. A collection of linear orderings is said to provide a rationalization by multiple rationales for a choice function if the choice from any choice set can be rationalized by one of the orderings. We characterize a tight upper bound on the minimal number of orderings that is required to rationalize arbitrary choice functions, and calculate the minimal number for several specific choice procedures.},
url = {/files/dp278.pdf},
author = {Gil Kalai, Ariel Rubinstein and Ran Spiegler}
}
@booklet {neyman-ratisg2001,
title = {Real Algebraic Tools in Stochastic Games},
journal = {Discussion Papers},
number = {272},
year = {2001},
month = {9},
publisher = {In Stochastic Games and Applications, A. Neyman and S. Sorin (Eds.), Kluwer Academic Press (2003)},
abstract = {The present chapter brings together parts of the theory of polynomial equalities and inequalities used in the theory of stochastic games. The theory can be considered as a theory of polynomial equalities and inequalities over the field of real numbers or the field of real algebraic numbers or more generally over an arbitrary real closed field.},
url = {/files/dp272.pdf},
author = {Abraham Neyman}
}
@booklet {sudholter-rtp2001,
title = {Reconfirming the Prenucleolus},
journal = {Discussion Papers},
number = {267},
year = {2001},
month = {8},
publisher = {Mathematics of Operations Research 28 (2003), 283-293},
abstract = {By means of an example it is shown that the prenucleolus is not the only minimal solution that satisfies nonemptiness, Pareto optimality, covariance, the equal treatment property and the reduced game property, even if universe of players is infinite. This example also disproves a conjecture of Gurvich et al. Moreover, we prove that the prenucleolus is axiomatized by nonemptiness, covariance, the equal treatment property, and the reconfirmation property, provided the universe of players is infinite.},
url = {/files/dp267.pdf},
author = {Sudholter, Gooni Orshan and Peter}
}
@booklet {winter-saoaor2001,
title = {Scapegoats and Optimal Allocation of Responsibility},
journal = {Discussion Papers},
number = {266},
year = {2001},
month = {8},
abstract = {We consider a model of hierarchical organizations in which agents have the option ofreducing the probability of failure by investing towards their decisions. A mechanismspecifies a distribution of sanctions in case of failure across the levels of the hierarchy. Itis said to be investment-inducing if it induces all agents to invest in equilibrium. It is saidto be optimal if it does so at minimal total punishment. We characterize optimalinvestment-inducing mechanisms in several versions of our benchmark model. Inparticular we refer to the problem of allocating individuals with diverse qualifications todifferent levels of the hierarchy as well as allocating tasks of different importance acrossdifferent hierarchy levels. We also address the issue of incentive-optimal hierarchyarchitectures.},
url = {/files/dp266.pdf},
author = {Winter, Eyal}
}
@booklet {mayabar-hillel-swasatcikmt2001,
title = {Seek Whence: Answer Sequences and Their Consequences in Key-Balanced Multiple-Choice Tests},
journal = {Discussion Papers},
number = {252},
year = {2001},
month = {6},
publisher = {The American Statistician 56 (2002), 299-303},
abstract = {The professional producers of such wide-spread high-stakes tests as the SAT have a policy of balancing, rather than randomizing, the answer keys of their tests. Randomization yields answer keys that are, on average, balanced, whereas a policy of deliberate balancing assures this desirable feature not just on average, but in every test. This policy is a well-kept trade secret, and apparently has been successfully kept as such, since there is no evidence of any awareness on the part of test takers and the coaches that serve them that this is an exploitable feature of answer keys. However, balancing leaves an identifiable signature on answer keys, thus not only jeopardizing the secret, but also creating the opportunity for its exploitation. The present paper presents the evidence for key balancing, the traces this practice leaves in answer keys, and the ways in which testwise test takers can exploit them. We estimate that such test takers can add between 10 and 16 points to their final SAT score, on average, depending on their knowledge level. The secret now being out of the closet, the time has come for test makers to do the right thing, namely to randomize, not balance, their answer keys.{\textquoteright}Following the link to the published version ofdp252, an earlier, but fuller,{\textquoteright}version{\textquoteright}is included.{\textquoteright}},
url = {/files/dp252.pdf},
author = {Maya Bar-Hillel, Yigal Attali}
}
@booklet {geanakoplos-sadrr2001,
title = {Signalling and Default: Rothschild-Stiglitz Reconsidered},
journal = {Discussion Papers},
number = {242},
year = {2001},
month = {5},
publisher = {The Quarterly Journal of Economics 117 (2002), 1529-1570.},
abstract = {In our previous paper we built a general equilibrium model of default and punishment in which equilibrium always exists and endogenously determines asset promises, penalties, and sales constraints. In this paper we interpret the endogenous sales constraints as equilibrium signals. By specializing the default penalties and imposing an exclusivity constraint on asset sales, we obtain a perfectly competitive version of the Rothschild-Stiglitz model of insurance. In our model their separating equilibrium always exists even when they say it doesn{\textquoteright}t.},
url = {/files/dp242.pdf},
author = {Geanakoplos, Pradeep Dubey and John}
}
@booklet {neyman-sgib2001,
title = {Singular Games in Bv{\textquoteright}NA},
journal = {Discussion Papers},
number = {262},
year = {2001},
month = {8},
abstract = {Every simple monotonic game in bv{\textquoteright}NA is a weighted majority game. Every game v in bv{\textquoteright}NA has a representation v=u+sum_i=1^inftyf_i o mu_i where u in pNA, mu_i in NA^1 and f_i is a sequence of bv{\textquoteright} functions with sum_i=1^infty||f_i||},
url = {/files/dp262.pdf},
author = {Abraham Neyman}
}
@booklet {kalai-scatp2001,
title = {Social Choice and Threshold Phenomena},
journal = {Discussion Papers},
number = {279},
year = {2001},
month = {11},
abstract = {Arrow{\textquoteright}s theorem asserts that under certain conditions every non-dictatorial social choice function leads to nonrational social choice for some profiles. In other words, for the case of non-dictatorial social choice if we observe that the society prefers alternative A over B and alternative B over C we cannot deduce what its choice will be between B and C. Here we ask whether we can deduce anything from observing a sample of the society{\textquoteright}s choices on the society{\textquoteright}s choice in other cases? We prove that the answer is {\textquoteleft}{\textquoteleft}no{\textquoteright} for large societies for neutral and monotonic social choice function such that the society{\textquoteright}s choice is not typically determined by the choices of a few individuals. The proof is based on threshold properties of Boolean functions and on analysis of the social choice under some probabilistic assumptions on the profiles. A similar argument shows that under the same conditions for the social choice function but under certain other probabilistic assumptions on the profiles the social choice function will typically lead to rational choice for the society.},
url = {/files/dp279.pdf},
author = {Gil Kalai}
}
@booklet {winter-sasigf2001,
title = {Stability and Segregation in Group Formation},
journal = {Discussion Papers},
number = {263},
year = {2001},
month = {8},
publisher = {Games and Economic Behavior 38 (2002), 318-346.},
abstract = {This paper presents a model of group formation based on the assumption that individuals prefer to associate with people similar to them. It is shown that, in general, if the number of groups that can be formed is bounded, then a stable partition of the society into groups may not exist. A partition is defined as stable if none of the individuals would prefer be in a different group than the one he is in. However, if individuals characteristics are one-dimensional, then a stable partition always exists. We give sufficient conditions for stable partitions to be segregating (in the sense that, for example, low-characteristic individuals are in one group and high-characteristic ones are in another) and Pareto efficient. In addition, we propose a dynamic model of individual myopic behavior describing the evolution of group formation to an eventual stable, segregating, and Pareto efficient partition.},
url = {/files/dp263.pdf},
author = {Winter, Igal Milchtaich and Eyal}
}
@booklet {peleg-svpfciee2001,
title = {Stable Voting Procedures for Committees in Economic Environments},
journal = {Discussion Papers},
number = {246},
year = {2001},
month = {6},
publisher = {Journal of Mathematical Economics 30 (2001), 117-140},
abstract = {A strong representation of a committee, formalized as a simple game, on a convex and closed set of alternatives is a game form with the members of the committee as players such that (i) the winning coalitions of the simple game are exactly those coalitions, which can get any given alternative independent of the strategies of the complement, and (ii) for any profile of continuous and convex preferences, the resulting game has a strong Nash equilibrium. In the paper, it is investigated whether committees have representations on convex and compact subsets of R^m. This is shown ot be the case if there are vetoers; for committees with no vetoers the existence of strong representations depends on the structure of the alternative set as well as on that of the committee (its Nakamura-number). Thus, if A is strictly convex, compact and has smooth boundary, then no committee can have a strong representation on A. On the other hand, if A has non-smooth boundary, representations may exist depending on the Nakamura-number (if it is at least 7).},
url = {/files/dp246.pdf},
author = {Peleg, Hans Keiding and Bezalel}
}
@booklet {winter-smfnf2001,
title = {Subscription Mechanisms for Network Formation},
journal = {Discussion Papers},
number = {264},
year = {2001},
month = {8},
publisher = {Journal of Economic Theory 106 (2002), 242-264},
abstract = {We analyze a model of network formation where the costs of link formation are publicly known but individual benefits are not known to the social planner. The objective is to design a simple mechanism ensuring efficiency, budget balance and equity. We propose two mechanisms towards this end; the first ensures efficiency and budget balance but not equity. The second mechanism corrects the asymmetry in payoffs through a two-stage variant of the first mechanism. We also discuss an extension of the basic model to cover the case of directed graphs and give conditions under which the proposed mechanisms are immune to coalitional deviations.},
url = {/files/ Eyal264.pdf},
author = {Winter, Suresh Mutuswami and Eyal}
}
@booklet {segal-smatee2001,
title = {Super Majoritarianism and the Endowment Effect},
journal = {Discussion Papers},
number = {277},
year = {2001},
month = {11},
publisher = {Theory and Decision 55 (2003), 181-207},
abstract = {The American and some other constitutions entrench property rights by requiring super majoritarian voting as a condition for amending or revoking their own provisions. Following Buchanan and Tullock [5], this paper analyzes individuals{\textquoteright} interests behind a veil of ignorance, and shows that under some standard assumptions, a (simple) majoritarian rule should be adopted. This result changes if one assumes that preferences are consistent with the behavioral phenomenon known as the "endowment effect." It then follows that (at least some) property rights are best defended by super majoritarian protection. The paper then shows that its theoretical results are consistent with a number of doctrines underlying American Constitutional Law.},
url = {/files/dp277.pdf},
author = {Segal, Uriel Procaccia and Uzi}
}
@booklet {nirdagan-ttns2001,
title = {Time-Preference Nash Solution, The},
journal = {Discussion Papers},
number = {265},
year = {2001},
month = {8},
abstract = {The primitives of a bargaining problem consist of a set, S, of feasible utility pairs and a disagree- ment point in it. The idea is that the set S is induced by an underlying set of physical outcomes which, for the purposes of the analysis, can be abstracted away. In a very influential paper Nash (1950) gives an axiomatic characterization of what is now the widely known Nash bargaining solution. Rubinstein, Safra, and Thomson (1992) (RST in the sequel) recast the bargaining problem into the underlying set of physical alternatives and give an axiomatization of what is known as the ordinal Nash bargaining solution. This solution has a very natural interpretation and has the interesting property that when risk preferences satisfy the expected utility axioms, it induces the standard Nash bargaining solution of the induced bargaining problem. This property justifies the proper name in the solution s appellation. The purpose of this paper is to give an axiomatic characterization of the rule that assigns the time-preference Nash outcome to each bargaining problem.},
url = {/files/dp265.pdf},
author = {Nir Dagan, Oscar Volij and Winter, Eyal}
}
@booklet {ullmann-margalit-tdaib2001,
title = {Trust, Distrust, and in Between},
journal = {Discussion Papers},
number = {269},
year = {2001},
month = {9},
publisher = {In Russell Hardin (ed.), Distrust, New York: Russell Sage Publications, 2004, 60-82},
abstract = {The springboard for this paper is the nature of the negation relation between the notions of trust and distrust. In order to explore this relation, an analysis of full trust is offered. An investigation follows of the ways in which this "end-concept" of full trust can be negated. In particular, the sense in which distrust is the negation of trust is focused on. An asymmetry is pointed to, between {\textquoteright}not-to-trust{\textquoteright} and {\textquoteright}not-to-distrust{\textquoteright}. This asymmetry helps explain the existence of a gap between trust and distrust: the possibility of being suspended between the two. Since both trust and distrust require reasons, the question that relates to this gap is what if there are no reasons, or at any rate no sufficient reasons, either way. This kind of situation, of being suspended between two poles without a sufficient reason to opt for any one of them, paradigmatically calls for a presumption. In the case in hand this means a call for either a rebuttable presumption in favor of trust or a rebuttable presumption in favor of distrust. In some of the literature on trust it seems to be taken almost for granted that generalized distrust is justifiable in a way that generalized trust is not. This would seem to suggest a straightforward recommendation for the presumption of distrust over the presumption of trust. Doubts are raised whether indeed it is justified to adopt this as a default presumption. The notion of soft distrust, which is introduced at this point as contrasted with hard distrust, contributes in a significant way to these doubts. The analysis offered throughout the paper is of individual and personal trust and distrust. As it stands, it would seem not to be directly applicable to the case of trusting or distrusting institutions (like the court or the police). The question is therefore raised, in the final section, whether and how the analysis of individual trust and distrust can be extended to institutional trust and distrust. A case is made that there is asymmetry here too: while it is a misnomer to talk of trusting institutions, talk of distrusting institutions is not.},
url = {/files/dp269.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {haimanko-udwpi2001,
title = {Unilateral Deviations with Perfect Information},
journal = {Discussion Papers},
number = {249},
year = {2001},
month = {6},
abstract = {For extensive form games with perfect information, consider a learning process in which, at any iteration, each player unilaterally deviates to a best response to his current conjectures of others{\textquoteright} strategies; and then updates his conjectures in accordance with the induced play of the game. We show that, for generic payoffs, the outcome of the game becomes stationary in finite time, and is consistent with Nash equilibrium. In general, if payoffs have ties or if players observe more of each others{\textquoteright} strategies than is revealed by plays of the game, the same result holds provided a rationality constraints is imposed on unilateral deviations: no player changes his moves in subgames that he deems unreachable, unless he stands to improve his payoff there. Moreover, with this constraint, the sequence of strategies and conjectures also becomes stationary and yields a self-confirming equilibrium.},
url = {/files/dp249.pdf},
author = {Haimanko, Pradeep Dubey and Ori, B.,}
}
@booklet {simon-otueasoks2001,
title = {On the Unique Extensibility and Surjectivity of Knowledge Structures},
journal = {Discussion Papers},
number = {274},
year = {2001},
month = {9},
abstract = {With the S5 multi-agent epistemic logic we consider the canonical maps from Krpke structures to knowledge structures. We define a cell to be a minimal subset of knowledge structures known in common semantically by the agents. A cell has finite fanout if at every point every agent considers only a finite number of other points to be possible. We define a cell to be surjective if every Kripke structure that maps to it does so surjectively. All cells with finite fanout are surjective, but the converse does not hold. To construct a counter-example we need topological insights concerning the relationship between the logic and its semantic models. The difference between syntactic and semantic common knowledge is central to this construction.},
url = {/files/dp274.pdf},
author = {Robert Samuel Simon}
}
@booklet {hon-snir-ueia2001,
title = {Utility Equivalence in Auctions},
journal = {Discussion Papers},
number = {257},
year = {2001},
month = {7},
abstract = {Auctions are considered with a (non-symmetric) independent-private-value model of valuations. It shall be demonstrated that a utility equivalence principle holds for an agent if and only if such agent has a constant absolute risk-attitude.},
url = {/files/dp257.pdf},
author = {Shlomit Hon-Snir}
}
@booklet {neyman-avo2001,
title = {A Value on {\textquoteright}AN},
journal = {Discussion Papers},
number = {276},
year = {2001},
month = {11},
publisher = {International Journal of Game Theory 32 (2003), 109-120},
abstract = {We prove here the existence of a value (of norm 1) on the spaces {\textquoteright}NA and even {\textquoteright}AN, the closure in the variation distance of the linear space spanned by all games f o mu, where mu is a non-atomic, non-negative finitely additive measure of mass 1 and f a real-valued function on [0, 1] which satisfies a much weakened continuity at zero and one.},
url = {/files/dp276.pdf},
author = {Neyman, Jean-Francois Mertens and Abraham}
}
@booklet {neyman-vogwimp2001,
title = {Values of Games with Infinitely Many Players},
journal = {Discussion Papers},
number = {247},
year = {2001},
month = {6},
publisher = {Handbook of Game Theory, with Economic Applications, Vol. III, R. J. Aumann and S. Hart (eds.), Elsevier North-Holland (2002), 2121-2167.},
abstract = {The Shapley value is one of the basic solution concepts of cooperative gaem theory. It can be viewed as a sort of average or expected outcome, or as an a priori evaluation of the players{\textquoteright} expected payoffs. The value has a very wide range of applications, particularly in economics and political science (see chapters 32, 33 and 34 in this Handbook). In many of these applications it is necessary to consider games that involve a large number of players. Often most of the players are individually insignificant, and are effective in the game only via coalitions. At the same time there may exist big players who retain the power to wield single-handed influence. A typical example is provided by voting among stockholders of a corporation, with a few major stockholders and an "ocean" of minor stockholders. In economics, one considers an oligopolistic sector of firms embedded in a large population of "perfectly competitive" consumers. In all of these cases, it is fruitful to model the game as one with a continuum of players. In general, the continuum consists of a non-atomic part (the "ocean"), along with (at most countably many) atoms. The continuum provides a convenient framework for mathematical analysis, and approximates the results for large finite games well. Also, it enables a unified view of games with finite, countable or oceanic player-sets, or indeed any mixture of these.},
url = {/files/dp247.pdf},
author = {Abraham Neyman}
}
@booklet {hart-vopce2001,
title = {Values of Perfectly Competitive Economies},
journal = {Discussion Papers},
number = {234},
year = {2001},
month = {1},
publisher = {In R. J. Aumann \& S. Hart (eds.) Handbook of Game Theory, with Economic Applications. (2002) Vol. III, Ch. 57, Elsevier/North-Holland},
abstract = {This chapter is devoted to the study of economic models with many agents, each of whom is relatively insignificant. These are referred to as perfectly competitive models. The basic economic concept for such models is the competitive (or Walrasian) equilibrium, which prescribes prices that make the total demand equal to the total supply, i.e., under which the "markets clear." The fact that each agent is negligible implies that he cannot singly affect the prices, and so he takes them as given when finding his optimal consumption - "demand." The chapter is organized as follows: Section 2 presents the basic model of an exchange economy with a continuum of agents, together with the definitions of the appropriate concepts. The Value Principle results are stated in Section 3. An informal (and hopefully instructive) proof of the Value Equivalence Theorem is provided in Section 4. Section 5 is devoted to additional material, generalizations, extensions and alternative approaches.},
url = {/files/ val-hgt.html},
author = {Sergiu Hart}
}
@booklet {wu-wlcimpi2001,
title = {When Less Competition Induces More Product Innovation},
journal = {Discussion Papers},
number = {255},
year = {2001},
month = {6},
publisher = {Economics Letters 74 (2002), 309-312.},
abstract = {Consider firms which engage in Cournot competition over a common product, but can undertake innovation to improve the quality of their product. In this scenario it can often happen that innovation is discouraged by too much or too little competition, and occurs only when the industry is of intermediate size.},
url = {/files/dp255.pdf},
author = {Wu, Pradeep Dubey and Chien-Wei}
}
@booklet {sorin-ztrgwpudp2001,
title = {Zero-Sum Two-Person Repeated Games with Public Uncertain Duration Process},
journal = {Discussion Papers},
number = {259},
year = {2001},
month = {7},
abstract = {We consider repeated two-person zero-sum games where the number of repetitions theta is unknown. The information about the uncertain duration is identical to both players and can change during the play of the game. This is described by an uncertain duration process Theta. To each repeated game Gamma and uncertain duration process Theta is associated the Theta repeated game Gamma_Theta with value V_Theta. We establish a recursive formula for the value V_Theta. We study asymptotic properties of the value v_Theta=V_Theta/E(theta) as the expected duration E(theta) goes to infinity. We extend and unify several asymptotic results on the existence of lim v_n and lim v_lambda and their equality to lim v_Theta. This analysis applies in particular to stochastic games and repeated games of incomplete information.},
url = {/files/dp259.pdf},
author = {Sorin, Abraham Neyman and Sylvain}
}
@booklet {schul-aaepicnatroisq2000,
title = {Acceptance and Elimination Procedures in Choice: Non-Complementarity and the Role of Implied Status Quo},
journal = {Discussion Papers},
number = {211},
year = {2000},
month = {2},
publisher = {Organizational Behavior and Human Decision Processes 82 (2000), 293-313},
abstract = {The present research contrasts two seemingly complementary decision strategies: acceptance and elimination. In acceptance, a choice set is created by including suitable alternatives from an initial set of alternatives, whereas in elimination it is created by removing inappropriate alternatives from that same initial set. The research used realistic career decision-making scenarios and presented to respondents sets of alternatives that varied in their pre-experimental strength values. Whereas complementarity of acceptance and elimination is implied by three standard (normative) assumptions of decision theory, we find a systematic discrepancy between the outcomes of these procedures: choice sets were larger in elimination than in acceptance. This acceptance/elimination discrepancy is directly tied to sub-complementarity. The central tenet of the theoretical framework developed here is that acceptance and elimination procedures imply different types of status quo for the alternatives, thereby invoking a different selection criterion for each procedure. A central prediction of the dual-criterion framework is the "middling" alternatives should be most susceptible to the type of procedure used. The present studies focus on this prediction which is substantiated by the results showing that "middling" alternatives yield the greatest discrepancy between acceptance and elimination. The implications of this model and findings for various research domains are discussed.},
url = {/files/dp211.pdf},
author = {Schul, Ilan Yaniv and Yaacov}
}
@booklet {kleinberger-atidmedarf2000,
title = {Advice Taking in Decision Making: Egocentric Discounting and Reputation Formation},
journal = {Discussion Papers},
number = {212},
year = {2000},
month = {2},
publisher = {Organizational Behavior and Human Decision Processes 83 (2000), 260-281},
abstract = {Our framework for understanding advice-taking in decision making rests on two theoretical concepts that motivate the studies and serve to explain the findings. The first is egocentric discounting of others{\textquoteright} opinion and the second is reputation formation for advisors. We review the evidence for these concepts, trace their theoretical origins, and point out some of their implications. In three studies we measured decision makers{\textquoteright} "weighting policy" for the advice, and in a fourth study, their "willingness to pay" for it. Briefly, we found that advice is discounted relative to own opinion, and reputation for advisors is rapidly formed and asymmetrically revised. The asymmetry implies that it may be easier for advisors to lose a good reputation than to gain it. The cognitive and social origins of these phenomena are considered.},
url = {/files/dp212.pdf},
author = {Kleinberger, Ilan Yaniv and Eli}
}
@booklet {gorfine-adoipdg2000,
title = {Analysing Data of Intergroup Prisoner{\textquoteright}s Dilemma Game},
journal = {Discussion Papers},
number = {215},
year = {2000},
month = {3},
abstract = {The Intergroup Prisoner{\textquoteright}s Dilemma (IPD) game was suggested by Bornstein (1992) for modeling intergroup conflicts over continuous public goods. We analyze data of an experiment in which the IPD game was played for 150 rounds, under three matching conditions. The objective is to study differences in the investment patterns of players in the different groups. A repeated measures analysis (Goren \& Bornstein, 1999) involved data aggregation and strong distributional assumptions. Here we introduce a non-parametric approach based on permutation tests, applied to the raw data. Two new measures, the cumulative investment and the normalized cumulative investment, provide additional insight into the differences between groups. The proposed tests, based on the area under the investment curves, identify overall and pairwise differences between groups. A simultaneous confidence band for the mean difference curve is used to detect games which account for pairwise differences.},
url = {/files/dp215.ps},
author = {Gorfine, Ronit Nirel and Malka}
}
@booklet {shapira-alartbgbt2000,
title = {Aspiration Levels and Risk Taking by Government Bond Traders},
journal = {Discussion Papers},
number = {227},
year = {2000},
month = {11},
abstract = {The management of risk is important in financial institutions. In particular, investment houses dealing with volatile financial markets such as foreign exchange or government bonds may find it difficult ot maintain "proper" levels of risk taking. On one hand, firms encourage traders to take risks in trading government bonds, but on the other, they promote risk aversion since they value reputation as careful and solid investors rather than having a reputation of risk takers. Government bond traders work in a very volatile and fast moving market. They are compensated by a base salary plus a bonus which relates to the profit and loss (P\&L) they create for the firm on the securities they trade. Recent models of risk taking (Kahneman and Tversky, 1979; March and Shapira, 1992; Shapira, 1995) suggest that risk taking is affected by the targets or reference points that people use to evaluate risky prospects. Such targets can be set by "objective" grounds, that is, based on some rational economic considerations of profitability. However, often the targets are set in a "comparative" sense, that is, by comparison to the performance of other similar firms. The above models suggest some alternative ways in which targets may affect risk taking. These predictions are tested using data on actual purchase and sell decision made by government bond traders. Implications for risk management are discussed.},
url = {/files/ zur227.pdf},
author = {Zur Shapira}
}
@booklet {simon-tcpaibsae2000,
title = {Common Prior Assumption in Belief Spaces: An Example, The},
journal = {Discussion Papers},
number = {228},
year = {2000},
month = {12},
abstract = {With four persons there is an example of a probability space where 1) the space is generated by hierarchies of knowledge concerning a single proposition, 2) the subjective beliefs of the four persons are continuous regular conditional probability distributions of a common prior probability distribution (continuous with respect to the weak topology), and 3) for every subset that the four persons know in common there is no common prior probability distribution. Furthermore, for every measurable set, every person, and at every point in the space, the subjective belief in this measurable set is one of the quantities 0, 1/2 or 1. This example presents problems for understanding games of incomplete information through common priors.},
url = {/files/dp228.PDF},
author = {Robert Samuel Simon}
}
@booklet {simon-einsgwfmsaepudf2000,
title = {Epsilon-Equilibria in Non-Zero-Sum Stochastic Games with Finitely Many States, An Existence Proof Using Discount Factors},
journal = {Discussion Papers},
number = {225},
year = {2000},
month = {8},
abstract = {This paper proves the existence of epsilon-equilibria in non-zero-sum positive recursive stochastic games with finitely many states, using a kind of discount factor.},
url = {{\textquoteright}},
author = {Robert S. Simon}
}
@booklet {assafben-shoham-teoe2000,
title = {Evolution of Exchange, The},
journal = {Discussion Papers},
number = {219},
year = {2000},
month = {5},
abstract = {Stochastic stability is applied to the problem of exchange. We analyze the stochastic stability of two dynamic trading processes in a simple housing market. In both models traders meet in pairs at random and exchange their houses when trade is mutually beneficial, but occasionally they make mistakes. The models differ in the probability of mistakes. When all mistakes are equally likely, the set of stochastically stable allocations contains the set of efficient allocations. When more serious mistakes are less likely, the stochastically stable states are those allocations, always efficient, with the lowest envy-level.},
url = {/files/dp219.pdf},
author = {Assaf Ben-Shoham, Roberto Serrano and Volij, Oscar}
}
@booklet {tamarkeasar-fbitbsleapdr2000,
title = {Foraging Bees in Two-Armed Bandit Situations: Laboratory Experiments and Possible Decision Rules},
journal = {Discussion Papers},
number = {226},
year = {2000},
month = {10},
publisher = {Behavioral Ecology 13 (2002), 757-765},
abstract = {In multi-armed bandit situations, gamblers must choose repeatedly between options that differ in reward probability, without prior information on the options{\textquoteright} relative profitability. Foraging bumblebees encounter similar situations when choosing repeatedly among flower species that differ in food rewards. Unlike proficient gamblers, bumblebees do not choose the highest-rewarding option exclusively. We simulated two-armed bandit situations in laboratory experiments to characterize this choice behavior.},
url = {{\textquoteright}},
author = {Tamar Keasar, Ella Rashkovich, Dan Cohen and Avi Shmida}
}
@booklet {cahn-gpltce2000,
title = {General Procedures Leading to Correlated Equilibria},
journal = {Discussion Papers},
number = {216},
year = {2000},
month = {5},
publisher = {International Journal of Game Theory 33 (2004), 21-40},
abstract = {Hart and Mas-Colell (2000) show that if all players play "regret matching" strategies, i.e. they play with probabilities proportional to the regrets, then the empirical distributions of play converge to the set of correlated equilibria, and the regrets of each player converge to zero. Here we show that if only one player, say player i , plays according to these probabilities, while the other players are "not too sophisticated", then the result that player i{\textquoteright}s regrets converge to zero continues to hold. The condition of "not too sophisticated" essentially says that the effect of one change of action of player i on the future actions of the other players decreases to zero as the horizon goes to infinity. Furthermore, we generalize all these results to a whole class of "regret based" strategies. In particular, these include the "smooth fictitious play" of Fudenberg and Levine (1998).},
url = {/files/dp216.pdf},
author = {Amotz Cahn}
}
@booklet {volij-idod2000,
title = {In Defense of DEFECT},
journal = {Discussion Papers},
number = {220},
year = {2000},
month = {5},
publisher = {Games and Economic Behavior 39 (2000), 309-321.},
abstract = {The one-state machine that always defects is the only evolutionarily stable strategy in the machine game that is derived from the prisoner{\textquoteright}s dilemma, when preferences are lexicographic in complexity. This machine is the only stochastically stable strategy of the machine game when players are restricted to choosing machines with a uniformly bounded complexity.},
url = {/files/dp220.pdf},
author = {Volij, Oscar}
}
@booklet {murielney-nifle-laclibiatce2000,
title = {Location and Color Learning in Bumblebees in a Two-Phase Conditioning Experiment},
journal = {Discussion Papers},
number = {213},
year = {2000},
month = {2},
publisher = {Journal of Insect Behavior 14 (2001), 697-711.},
abstract = {Bees learn the location, odor, color and shape of flowers, and use these cues hierarchically to make dietary choices. If two such cues always appear together, they provide the bees with identical information about their food source. In such a situation, bees may base dietary choices on one cue and ignore the other, or they may continue to consider both cues. We studied this question by allowing bumblebees to forage on two patches of artificial flowers that differed in location, color and presence of reward in a two-phase laboratory experiment. We switched either the display color, the location, or both color and location associated with the rewarding patch between experimental phases. We tested for the effects of switches by comparing the bees{\textquoteright} choices across treatments, and by evaluating each bee{\textquoteright}s performance before and after the change. In our analysis we characterized the different patterns of visits to empty flowers by a plot of the cumulative frequency of such visits over time. This plot enabled us to identify two regimes: ( I ) a learning regime, when new associations between reward and display cues are formed, followed by (2) a steady-state where bees make periodic visits to the empty patch. We used likelihood analysis to estimate the length of short-term memory that can account for the bees{\textquoteright} steady-state foraging choices. The bees{\textquoteright} performance decreased immediately following a switch in location of the rewarding patch. Switches in both reward color and location elicited a similar decrease to switches in location only. No temporary decrease in foraging performance occurred when only color of the rewarding patch was changed, and in no-change controls. The bees{\textquoteright} flower choices at steady-state were most likely generated by a short-term memory of the last 4-6 flower visits.},
url = {/files/db213.pdf},
author = {Muriel Ney-Nifle, Tamar Keasar and Avi Shmida}
}
@booklet {shapira-maotaeteoi2000,
title = {Managerial Allocation of Time and Effort: The Effects of Interruptions},
journal = {Discussion Papers},
number = {230},
year = {2000},
month = {12},
publisher = {Management Science 47 (2001), 647-662},
abstract = {Time is one of the more salient constraints on managerial behavior. This constraint may be very taxing in high velocity environments where managers have to attend to many tasks simultaneously. Earlier work by Radner [1976] proposed models based on notions of the thermostat or putting out fires to guide managerial time and effort allocation among tasks. We link these ideas to the issue of the level of complexity of the tasks to be attended to while alluding to the sequential versus parallel modes of processing. We develop a stochastic model to analyze the behavior of a manager who has to attend to a few short term processes while attempting to devote as much time as possible to pursue a long term project. A major aspect of this problem is how does the manager deal with interruptions. Different rules of attention allocation are proposed and their implications to managerial behavior are discussed.},
url = {/files/db230.pdf},
author = {Shapira, Sridhar Seshadri and Zur}
}
@booklet {peleg-tppoacg2000,
title = {Positive Prekernel of a Cooperative Game, The},
journal = {Discussion Papers},
number = {231},
year = {2000},
month = {12},
publisher = {International Game Theory Review 2 (2000), 287-305},
abstract = {The positive prekernel, a solution of cooperative transferable utility games, is introduced. We show that this solution inherits many properties of the prekernel and of the core, which both are subsolutions. It coincides with its individually rational variant, the positive kernel, when applied to any zero-monotonic game. The positive (pre)kernel is a subsolution of the reactive (pre) bargaining set. We prove that the positive prekernel on the set of games with players belonging to a universe of at least three possible members can be axiomatized by nonemptiness, anonymity, reasonablenss, the weak reduced game property, the converse reduced game property, and a weak version of unanimity for two-person games.},
url = {/files/dp231.pdf},
author = {Peleg, Peter Sudholter and Bezalel}
}
@booklet {samuel-cahn-pifosrwpr2000,
title = {Prophet Inequalities for Optimal Stopping Rules with Probabilistic Recall},
journal = {Discussion Papers},
number = {210},
year = {2000},
month = {2},
publisher = {Bernoulli 8 (2002), 39-52},
abstract = {{Let X_i},
url = {{\textquoteright}},
author = {Samuel-Cahn, David Assaf and Ester}
}
@booklet {cohen-arbfibab2000,
title = {A Rational Basis for Irrational Beliefs and Behaviors},
journal = {Discussion Papers},
number = {209},
year = {2000},
month = {1},
abstract = {No Abstract},
url = {{\textquoteright}},
author = {Cohen, Dan}
}
@booklet {zamir-raeiub2000,
title = {Rationality and Emotions in Ultimatum Bargaining},
journal = {Discussion Papers},
number = {222},
year = {2000},
month = {7},
publisher = {Annals D{\textquoteright}Economie Et de Statistique 61 (2001) 1-31},
abstract = {The Ultimatum Bargaining paradigm is often thought of as a demonstration of extreme disagreement between experimental evidence and game theoretical predictions and the basic assumption of rationality from which they are derived. Using the data of four experiments on Ultimatum Bargaining which I am involved in, I argue that, quite differently from this general impression, rationality in the sense of self-interested motives, is very much present in the observed behavior of both proposers and responders in the Ultimatum Bargaining game. Part of the argument calls for a broader interpretation of the notion of rationality than just immediate money maximization and the backward induction argument.},
url = {/files/dp222.pdf},
author = {Shmuel Zamir}
}
@booklet {mas-colell-arpltce2000,
title = {A Reinforcement Procedure Leading to Correlated Equilibrium},
journal = {Discussion Papers},
number = {224},
year = {2000},
month = {8},
publisher = {G. Debreu, W. Neuefeind \& W. Trockel (eds.), Economic Essays: A Festschrift for Werner Hildenbrand, Springer (2001), 181-200},
abstract = {We consider repeated games where at any period each player knows only his set of actions and the stream of payoffs that he has received in the past. He knows neither his own payoff function, nor the characteristics of the other players (how many there are, their strategies and payoffs). In this context, we present an adaptive procedure for play - called "modified-regret- matching" - which is interpretable as a stimulus-response or reinforcement procedure, and which has the property that any limit point of the empirical distribution of play is a correlated equilibrium of the stage game.},
url = {http://reinfr.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {peleg-roeficpneacc2000,
title = {Representation of Effectivity Functions in Coalition Proof Nash Equilibrium: A Complete Characterization},
journal = {Discussion Papers},
number = {223},
year = {2000},
month = {8},
publisher = {Social Choice and Welfare 19 (2002), 241-263},
abstract = {The concept of coalition proof Nash equilibrium was introduced by Bernheim, Peleg and Whinston. In the present paper, we consider the representation problem for coalition proof Nash equilibrium: For a given effectivity function, describing the power structure or the system of rights of coalitions in society, it is investigated whether there is a game form which gives rise to this effectivity function and which is such that for any preference assignment, there is a coalition proof Nash equilibrium. It is shown that the effectivity functions which can be represented in coalition proof Nash equilibrium are exactly those which satisfy the well-known properties of maximality and superadditivity. As a corollary of the result, we obtain necessary conditions for implementation of a social choice correspondence in coalition proof Nash equilibrium which can be formulated in terms of the associated effectivity function.},
url = {/files/dp223.pdf},
author = {Peleg, Hans Keiding and Bezalel}
}
@booklet {winter-oraabo2000,
title = {On Risk Aversion and Bargaining Outcomes},
journal = {Discussion Papers},
number = {214},
year = {2000},
month = {3},
publisher = {Games and Economic Behavior 41 (2002), 120-140.},
abstract = {We revisit the well-known result that asserts that an increase in the degree of one{\textquoteright}s risk aversion improves the position of one{\textquoteright}s opponents. To this end, we apply Yaari{\textquoteright}s dual theory of choice under risk both to Nash{\textquoteright}s bargaining problem and to Rubinstein{\textquoteright}s game of alternating offers. Under this theory, unlike under expected utility, risk aversion influences the bargaining outcome only when this outcome is random, namely, when the players are risk-lovers. In this case, an increase in one{\textquoteright}s degree of risk aversion increases one{\textquoteright}s share of the pie.},
url = {/files/dp214.pdf},
author = {Winter, Oscar Volij and Eyal}
}
@booklet {ullmann-margalit-sg2000,
title = {Solidarity Goods},
journal = {Discussion Papers},
number = {217},
year = {2000},
month = {5},
publisher = {The Journal of Political Philosophy 9 (2001), 129-149.},
abstract = {Contrary to a common picture of relationships in a market economy, people often express communal and membership-seeking impulses via consumption choices, purchasing goods and services because other people are doing so as well. Shared identities are maintained and created in this way. Solidarity goods are goods whose value increases as the number of people enjoying them increases. Exclusivity goods are goods whose value decreases as the number of people enjoying them increases. Distinctions can be drawn among diverse value functions, capturing diverse relationships between the value of goods and the value of shared or unshared consumption. Though markets spontaneously produce solidarity goods, individuals sometimes have difficulty in producing such goods on their own, or in coordinating on choosing them. Here law has a potential role. There are implications for trend setting, clubs, partnerships, national events, social cascades and compliance without enforcement.},
url = {/files/dp217.pdf},
author = {Ullmann-Margalit, Cass R. Sunstein and Edna}
}
@booklet {zamir-tsuosiipa2000,
title = {Strategic Use of Seller Information in Private-Value Auctions, The},
journal = {Discussion Papers},
number = {221},
year = {2000},
month = {7},
abstract = {In the framework of a first-price private-value auction, we study the seller as a player in a game with the buyers in which he has private information about their realized valuations. We find that depending upon his information, set of signals, and commitment power, he may strategically transmit messages to buyers in order to increase his revenue. In an environment where the seller knows the rankings and lacks any commitment power, we find that the seller is unable to exploit his information. However, in an environment where the seller knows the realized valuations and can credibly annouce either the true rankings or the true values (or announce nothing at all) but cannot commit as to which of these truthful messages to announce, then it is indeed possible to increase his revenue. If the seller, in addition, can commit to the full signaling strategy, then his expected revenue will be even higher. We believe that this line of research is fruitful for both better understanding behavior in auctions and finding paths to higher seller revenue.},
url = {/files/dp221.pdf},
author = {Zamir, Todd R. Kaplan and Shmuel}
}
@booklet {bar-hillel-spj2000,
title = {Subjective Probability Judgments},
journal = {Discussion Papers},
number = {229},
year = {2000},
month = {12},
publisher = {In N. J. Smelser and P. B. Baltes (eds.) The International Encyclopedia of the Social and Behavioral Sciences 22 (2002) 15247-15251},
abstract = {Subjective probabilities are probabilities people express for uncertain events or outcomes. They are generated, or judged, by two major heuristics: 1. When outcomes are unique (e.g., the guilt of some defendant) or set in the future (e.g., the winner of the next election), the approach is "theoretical". People pull together whatever they know, or believe, to be relevant, and judge the probabilities of the possible outcomes by the closeness of the match between them and whatever "prediction model" they have built in their heads. This heuristic is called representativeness. 2. When outcomes are grouped in categories or by features (e.g., the percent of convictions for a given charge, or the percent of elections won by incumbents), the approach is "empirical": Let{\textquoteright}s sample what{\textquoteright}s out there and count. If the sampling is done in one{\textquoteright}s head, and the probabilities judged by the number of examples that come to mind, or by the ease {\textendash} real or anticipated {\textendash} with which they come to mind, the heuristic is that of availability. These heuristics have distinct signatures. They lead to predictable and systematic biases, among them: the extension fallacy, the base-rate fallacy, sample size neglect, regression neglect, the unpacking effect, overconfidence, hindsight bias and more.},
url = {/files/dp229.pdf},
author = {Bar-Hillel, Maya}
}
@booklet {danielgranot-vfvtuc2000,
title = {Voting for Voters: The Unanimity Case},
journal = {Discussion Papers},
number = {218},
year = {2000},
month = {5},
publisher = {International Journal of Game Theory 31 (2003), 155-202.},
abstract = {We present a simplified model of the evolution of a society which is regulated by a formal unanimity voting procedure. We examine several protocols, which depend on whether admission or expulsion are permissible, and on the order with which they are implemented. Conditions which ensure the existence of pure-strategy perfect equilibrium profiles for some voting protocols, and counter examples for the existence of such profiles in other protocols are presented. Finally, we prove that the original founders would prefer a protocol in which expulsion precedes admission to protocols in which admission precedes expulsion, or the two are treated simultaneously.},
url = {/files/dp218.pdf},
author = {Daniel Granot, Michael Maschler and Jonathan Shalev}
}
@booklet {petersudholter-tcefoagfpi-r1999,
title = {Canonical Extensive Form of a Game Form: Part II - Representation, The},
journal = {Discussion Papers},
number = {202},
year = {1999},
month = {8},
publisher = {Journal of Mathematical Economics 33(2000),299-338},
abstract = {This paper exhibits to any noncooperative game in strategic or normal form a {\textquoteright}canonical{\textquoteright} game in extensive form that preserves all symmetries of the former one. The operation defined this way respects the restriction of games to subgames and yields a minimal total rank of the tree involved. Moreover, by the above requirements the {\textquoteright}canonical extensive game form{\textquoteright} is uniquely defined. Key words: Games, Extensive Form, Normal Form, Strategic Form. AMS(MOS) Subject Classification: 90D10, 90D35, 05C05},
url = {/files/dp202.pdf},
author = {Peter Sudholter, Joachim Rosenmuller and Bezalel Peleg}
}
@booklet {wernerguth-copaiisgot1999,
title = {Co-Evolution of Preferences and Information in Simple Games of Trust},
journal = {Discussion Papers},
number = {190},
year = {1999},
month = {2},
publisher = {German Economic Review 1 (2000), 83-110. Also in Khalil, E., L. (2003). Trust. Elgar Reference Collection. Critical Studies in Economic Institutions Vol 3, 631-58},
abstract = {In standard rational choice modelling decisions are made according to given information and preferences. In the model presented here the {\textquoteright}information technology{\textquoteright} of individual decision makers as well as their preferences evolve in a dynamic process. In this process decisions are made rationally by players who differ in their informational as well as in their preference type. Relative success of alternative decisions feeds back on the type composition of the population which in turn influences rational decision making. An indirect evolutionary analysis of an elementary yet important basic game of trust shows that under certain parameter constellations the population dynamics of the evolutionary process specify a unique completely mixed rest point. However, as opposed to previous studies of preference formation in the game of trust there is no convergence to but only cycling around the rest point if the informational status of individuals evolves rather than being chosen strategically.},
url = {/files/dp190.pdf},
author = {Werner Guth, Hartmut Kliemt and Bezalel Peleg}
}
@booklet {haimanko-cstnc1999,
title = {Cost Sharing: The Nondifferentiable Case},
journal = {Discussion Papers},
number = {205},
year = {1999},
month = {9},
publisher = {Journal of Mathematical Economics 35 (2001), 445-462.},
abstract = {We show existence and uniqueness of cost allocating mechanisms, satisfying standard axioms, on three classes of cost functions with major nondifferentiabilities. Two of the classes consist of nondecreasing convex functions, which exhibit either increasing or constant costs to scale. The third is the space of piecewise linear cost functions.},
url = {/files/dp205.pdf},
author = {Ori Haimanko}
}
@booklet {klausabbink-tcrug1999,
title = {Covered Response Ultimatum Game, The},
journal = {Discussion Papers},
number = {191},
year = {1999},
month = {2},
abstract = {We report an experiment on the covered response ultimatum game, in which the proposer is not informed about the responder{\textquoteright}s reaction to an unequal offer. In this game, no education of proposers is possible. A control experiment with informed proposers was also conducted. We observe high rejection rates with covered response. These are explained by responders{\textquoteright} resistance to unfairness. But the rejection rates are lower than in the control group, due to the lacking possibility of educative punishment. Proposers in the open response treatment test responders{\textquoteright} propensity to reject by making more unequal offers. We conclude that both resistance to unfairness and educative punishment are determinants of behaviour, but neither is sufficient on its own. Keywords Ultimatum bargaining, fairness, punishment, experimental economics.},
url = {/files/dp191.pdf},
author = {Klaus Abbink, Abdolkarim Sadrieh and Shmuel Zamir}
}
@booklet {bornstein-teoicoicitripdg1999,
title = {Effects of Intra-Group Communication on Intergroup Cooperation in the Repeated Intergroup Prisoner{\textquoteright}s Dilemma (IPD) Game, The},
journal = {Discussion Papers},
number = {204},
year = {1999},
month = {9},
publisher = {Journal of Conflict Resolution 44, 700-719},
abstract = {We report an experiment on individual and group behavior in intergroup conflict as modeled by the Intergroup Prisoner{\textquoteright}s Dilemma (IPD) game (Bornstein, 1992). The game was played repeatedly either with or without intra-group communication in an attempt to distinguish the dynamic process associated with reciprocation at the intergroup level from that resulting from adaptation at the individual level. We found that without communication, individuals gradually learned that it does not pay to contribute. The overall effect of within-group communication was to increase individual contribution. However, this effect varied greatly in later stages of the game. In some cases intragroup communication eliminated individual contribution, rewarding the members of both teams with the mutually cooperative outcome, while in other cases it intensified the intergroup conflict to its maximal level of full contribution. The implications for these findings to conflict resolution are discussed.},
url = {/files/dp204.pdf},
author = {Bornstein, Harel Goren and Gary}
}
@booklet {hart-edabi1999,
title = {Evolutionary Dynamics and Backward Induction [Revised]},
journal = {Discussion Papers},
number = {195},
year = {1999},
month = {9},
publisher = {Games and Economic Behavior 41 (2002) 227-264},
abstract = {The backward induction (or subgame-perfect) equilibrium of a perfect information game is shown to be the unique evolutionarily stable outcome for dynamic models consisting of selection and mutation, when the mutation rate is low and the populations are large. Keywords: games in extensive form, games of perfect information, backward induction equilibrium, subgame-perfect equilibrium, evolutionary dynamics, evolutionary stability, mutation, selection, population games. Journal of Economic Literature Classification: C7, D7, C6.},
url = {/files/ evol-bi.html},
author = {Sergiu Hart}
}
@booklet {reny-aeea1999,
title = {An Ex-Post Efficient Auction},
journal = {Discussion Papers},
number = {200},
year = {1999},
month = {8},
abstract = {An analogue of Vickrey{\textquoteright}s (1961) multi-unit auction is provided when bidders have interdependent values. The analogue is strategically equivalent to a collection of two-bidder single-unit second-price auctions and it possesses an ex-post efficient equilibrium. As an application of this result, it is shown that the FCC auction possesses an efficient equilibrium in the case of homogeneous goods. Conditions are provided under which the new auction (and also the FCC auction) revenue-dominates all ex-post equilibria of ex-post efficient individually rational mechanisms.},
url = {/files/dp200.pdf},
author = {Reny, Motty Perry and Philip J.}
}
@booklet {zamir-tgftsocirgoii1999,
title = {Game for the Speed of Convergence in Repeated Games of Incomplete Information, The},
journal = {Discussion Papers},
number = {187},
year = {1999},
month = {1},
publisher = {International Journal of Game Theory 31 (2002) 203-232},
abstract = {We consider an infinitely repeated zero-sum two-person game with incomplete information on one side, in which the maximizer is the (more) informed player. Such games have value Vx(p) for all 0},
url = {/files/dp187.pdf},
author = {Zamir, Irit Nowik and Shmuel}
}
@booklet {mas-colell-agcoas1999,
title = {A General Class of Adaptive Strategies},
journal = {Discussion Papers},
number = {192},
year = {1999},
month = {3},
publisher = {Journal of Economic Theory 98(2001), 26-54},
abstract = {We exhibit and characterize an entire class of simple adaptive strategies,in the repeated play of a game, having the Hannan-consistency property: In the long-run, the player is guaranteed an average payoff as large as the best- reply payoff to the empirical distribution of play of the other players; i.e., there is no "regret." Smooth fictitious play (Fudenberg and Levine [19951) and regret-matching (Hart and Mas-Colell [1998]) are particular cases. The motivation and application of this work come from the study of procedures whose empirical distribution of play is, in the long-run, (almost) a correlated equilibrium. The basic tool for the analysis is a generalization of Blackwell{\textquoteright}s [1956a) approachability strategy for games with vector payoffs. Keywords: adaptive strategies, approachability, correlated equilibrium, fictitious play, regret. Journal of Economic Literature Classification: C7, D7, C6},
url = {http://genadapt.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {haimanko-mcprfhcf1999,
title = {Marginal Cost Price Rule for Homogeneous Cost Functions},
journal = {Discussion Papers},
number = {206},
year = {1999},
month = {11},
publisher = {International Journal of Game Theory 31 (2002), 19-28.},
abstract = {We show that standard axioms determine uniquely the marginal cost pricing rule on homogeneous, convex and continuously differentiable cost functions.},
url = {/files/db206.pdf},
author = {Ori Haimanko}
}
@booklet {khmelnitskaya-maevftg1999,
title = {Marginalist and Efficient Values for TU Games},
journal = {Discussion Papers},
number = {188},
year = {1999},
month = {1},
publisher = {Mathematical Social Sciences 38 (1999), 45-54.},
abstract = {We derive an explicit formula for a marginalist and efficient value for a TU game which possesses the -player property and is either continuous or monotonic. We show that every such a value has to be additive and covariant as well. It follows that the set of all marginalist, efficient, and monotonic values possessing the -player property coincides with the set of random-order values, and thereby the last statement provides an axiomatization without the linearity axiom for the latter which is similar to that of Young for the Shapley value. Another axiomatization without linearity for random-order values is provided by marginalism, efficiency, monotonicity, and covariance. Keywords: Transferable utility game; Value; Axiomatic characterization; Efficiency; Mar- ginalism},
url = {/files/db188.pdf},
author = {Anna B. Khmelnitskaya}
}
@booklet {haimanko-pinpcte1999,
title = {Payoffs in Non-Differentiable Perfectly Competitive TU Economies},
journal = {Discussion Papers},
number = {197},
year = {1999},
month = {6},
publisher = {Journal of Economic Theory 106 (2002), 17-39.},
abstract = {We develop an axiomatization of a single-valued solution for finite-type perfectly competitive economies. The solution is a competitive payoff selection. Our axioms are similar to those of Dubey and Neyman for solutions of differentiable economies, and they give rise to the Mertens value.},
url = {/files/dp197.pdf},
author = {Ori Haimanko}
}
@booklet {khmelnitskaya-piwtta1999,
title = {Power Indices Without the Transfer Axiom},
journal = {Discussion Papers},
number = {189},
year = {1999},
month = {1},
publisher = {In H. de Swart (ed.) Logic, Game Theory and Social Choice. Proceedings of the International Conference LGS (1999) Tilburg University Press: 208-213},
abstract = {We show that for voting systems containing at least three voters the set of all marginalist, efficient, and monotonic power indices possessing the -player property coincide with the set of random-order power indices, and thereby the last statement spreads to simple games the result of Khmelnitskaya concerning an axiomatization without the linearity assumption for random-order values for the entire class of TU games. We also give evidence that every marginalist, efficient, and symmetric power index is just the Shapley-Shubik power index what provides an axiomatization for the latter similar to that of Young for the Shapley value; in symmetric case there is no restriction for a number of players to be not less than three. Keywords: Simple game; Power index; Axiomatic characterization; Efficiency; Marginalism},
url = {{\textquoteright}},
author = {Anna B. Khmelnitskaya}
}
@booklet {jenslethhougaard-otsoliitcoabg1999,
title = {On the Set of Lorenz-Maximal Imputations in the Core of a Balanced Game},
journal = {Discussion Papers},
number = {207},
year = {1999},
month = {11},
publisher = {International Journal of Game Theory 30(2001), 147-165},
abstract = {This paper considers the set of Lorenz-maximal imputations in the core of a balanced cooperative game as a solution concept. It is shown that the Lorenz-solution concept satisfies a number of suitable properties such as desirability, continuity and the reduced game property. Moreover, the paper considers alternative characterizations where it is shown that Lorenz-fairness is tantamount to the existence of an additive, strictly increasing and concave social welfare function. Finally the paper also provides axiomatic characterizations as well as two examples of application.},
url = {/files/ Peleg207.pdf},
author = {Jens Leth Hougaard, Bezalel Peleg and Lars Thorlund-Petersen}
}
@booklet {samuel-cahn-srpifaswmc1999,
title = {Simple Ratio Prophet Inequalities for a Statistician with Multiple Choices},
journal = {Discussion Papers},
number = {203},
year = {1999},
month = {8},
publisher = {Journal of Applied Probability 37 (2000) 1084-1091},
abstract = {{Let X_i>=0 be independent},
url = {/files/dp203.pdf},
author = {Samuel-Cahn, David Assaf and Ester}
}
@booklet {winter-ssmfepg1999,
title = {Simple Subscription Mechanisms for Excludable Public Goods},
journal = {Discussion Papers},
number = {199},
year = {1999},
month = {6},
publisher = {Journal of Economic Theory 1 (1999), 72-94.},
abstract = {For excludable public goods, we propose simple mechanisms to uniquely implement a (core) stable and efficient production and cost-sharing outcome: consumers are asked to announce sequentially their minimal requested level of public good and a subscription towards its production. In one mechanism the subscriptions are order-independent and thus symmetric. The equilibrium outcomes induced by our mechanisms are immune to strategic deviations by coalitions. Keywords: Excludable public good, demand-subscription mechanism, implementation, stand alone core, coalition formation, strong equilibrium. Journal of Economic Literature Classification Numbers: H41, C72, D78.},
url = {/files/dp199.pdf},
author = {Winter, Parimal Kanti Bag and Eyal}
}
@booklet {sudholter-sac1999,
title = {Single-Peakedness and Coalition-Proofness},
journal = {Discussion Papers},
number = {201},
year = {1999},
month = {8},
publisher = {Review of Economic Design 4 (1999), 381-387},
abstract = {We prove that multidimensional generalized median voter schemes are coalition-proof.},
url = {/files/dp201.pdf},
author = {Sudholter, Bezalel Peleg and Peter}
}
@booklet {khmelnitskaya-swofdsus1999,
title = {Social Welfare Orderings for Different Subgroup Utility Scales},
journal = {Discussion Papers},
number = {198},
year = {1999},
month = {6},
publisher = {Mathematical Social Sciences},
abstract = {This paper characterizes social welfare orderings for different scales of individual utility measurement in distinct population subgroups. Different combinations of ordinal, interval, ratio, and translation scales are studied. We consider situations when utility comparisons among subgroups of individuals by unit and/or zeropoint can or cannot be made, that is when subgroup scales are dependent or independent. We show that for combinations of independent subgroup scales, every corresponding social ordering is fully determined by the opinions of only one subgroup of individuals and is in accordance with the measurement scales of its members{\textquoteright} utilities. We also investigate social orderings admissible given various combinations of arbitrary ratio scales that combine individual utilities from different subgroups.},
url = {{\textquoteright}},
author = {Anna B. Khmelnitskaya}
}
@booklet {brendanmckay-stbcp1999,
title = {Solving The Bible Code Puzzle},
journal = {Discussion Papers},
number = {196},
year = {1999},
month = {6},
publisher = {Statistical Science 14(2)(1999), 150-173},
abstract = {A paper of Witztum, Rips and Rosenberg in this journal in 1994 made the extraordinary claim that the Hebrew text of the Book of Genesis encodes events which did not occur until millennia after the text was written. In reply, we argue that Witztum, Rips and Rosenberg{\textquoteright}s case is fatally defective, indeed that their result merely reflects on the choices made in designing their experiment and collecting the data for it. We present extensive evidence in support of that conclusion. We also report on many new experiments of our own, all of which failed to detect the alleged phenomenon.},
url = {/files/ BCStatSci-196.pdf},
author = {Brendan McKay, Dror Bar-Natan, Maya Bar-Hillel, Gil Kalai}
}
@booklet {davidassaf-ascbbpaos1999,
title = {A Striking Connection Between Branching Processes and Optimal Stopping},
journal = {Discussion Papers},
number = {194},
year = {1999},
month = {5},
publisher = {Journal of Applied Probability 37 (2000) 613-626},
abstract = {A curious connection exists between the theory of optimal stopping for independent random variables, and branching processes. In particular, for the branching process Zn with offspring distribution Y, there exists a random variable X such that the probability P(Zn = 0) of extinction of the n-th generation in the branching process equals the value obtained by optimally stopping the sequence X-i_,...,Xn, where these variables are i.i.d distributed as X. Generalizations to the inhomogeneous and infinite horizon-cases are also considered. This correspondence furnishes a simple {\textquoteright}stopping rule{\textquoteright} method for computing various characteristics of branching processes, including rates of convergence of the n-th generation{\textquoteright}s extinction probability to the eventual extinction probability, for the supercritical, critical and subcritical Galton-Watson process. Examples, bounds, further generalizations and a connection to classical prophet inequalities are presented. Throughout, the aim is to show how this unexpected connection can be used to translate methods from one area of applied probability to another, rather than to provide the most general results.},
url = {/files/dp194.pdf},
author = {David Assaf, Larry Goldstein and Ester Samuel-Cahn}
}
@booklet {volij-ueisbaatdtocur1999,
title = {Utility Equivalence in Sealed Bid Auctions and the Dual Theory of Choice Under Risk},
journal = {Discussion Papers},
number = {193},
year = {1999},
month = {3},
abstract = {This paper analyzes symmetric, single item auctions in the private values framework, with buyers whose preferences satisfy the axioms of Yaari{\textquoteright}s (1987) dual theory of choice under risk. It is shown that when their valuations are independently and identically distributed, buyers are indifferent among all the auctions contained in a big family of mechanisms that includes the standard auctions. It is also shown that in the linear equilibria of the sealed bid double auction, as the degree of players{\textquoteright} risk aversion grows arbitrarily large, the ex post inefficiency of the mechanism tends to vanish. JEL Classification Numbers: D44; D81},
url = {/files/dp193.pdf},
author = {Volij, Oscar}
}
@booklet {avitalmoshinsky-wd1hf-iaoieacafahb1999,
title = {Where Did 1850 Happen First - in America or in Europe? A Cognitive Account for an Historical Bias},
journal = {Discussion Papers},
number = {208},
year = {1999},
month = {12},
publisher = {Psychological Science 13(1) (2002), 20-25},
abstract = {Teachers of history note that pupils are often surprised to learn that a certain event in Europe happened at the same time as another in America, since to them the latter appears to have happened more recently. The validity of this anecdotal observation is supported by an experiment. This bias is explained by noting that America is The New World, while Europe is The Old World. Independent verification is offerred for this explanation. It is shown that the accessibility principle (i.e. that better known events appear more recent than less well known ones) neither accounts for the bias nor even operates with our stimuli.{\textquoteright}An earlier version of this paper, titled The Europe-America Bias:{\textquoteright} Where a historical event occurred affects when people think it occurred, was published later,{\textquoteright}in 2005, in Advances in Psychology Research (S. P. Shohov, ed.), vol. 33, 39-63.{\textquoteright} It is fuller, and in the link todp208 it{\textquoteright}follows the version published in Psychological Science in 2002.},
url = {/files/dp208.pdf},
author = {Avital Moshinsky, Maya Bar-Hillel}
}
@booklet {shirrinkagoubitz-afsibasouf1998,
title = {Age-Related Flower Sampling in Bumblebees: A Survey of Unsuccessful Foragers},
journal = {Discussion Papers},
number = {172},
year = {1998},
month = {5},
publisher = {Entomologia Generalis 29 (2007), 201-211},
abstract = {Naive bumble bees (Bombus terrestris) that did not learn to handle artificial flowers were examined for sampling frequency and duration before giving up. The munber of sample-bouts and the time of each sample-bout were measured, as well as the time in between two subsequent sample-bouts (pauses). This flower-sampling behavior of un successful individual bumble bees was related to the age of the bees, cohort and colony size. Younger bees sampled the flowers more frequently but stayed a shorter time each sample-bout than older bees. The duration of each separate pause was longer for older bees as well. The total sampling-time before giving up tended to be higher for the older bees. For all bees the subsequent sample-bouts showed a decrease in duration, while the duration of each subsequent pause increased. This was possibly caused by a negative re-enforcement by the unsuccessful samples. The higher unsuccessful sample-frequency of the younger bees could beconsidered a part ofa first orientation and learning process of flower handling. Therefore this sampling could influence the future behavior of young bees and might result in a higher capability of handling comlex flowers. Finally it should be emphasized that these results are our first prove of age-related learning and it is suggested that research along this line could result in more evidence of age-related foraging behavior.},
url = {{\textquoteright}},
author = {Shirrinka Goubitz, Tamar Keasar and Avi Shmida}
}
@booklet {bezalelpeleg-tcefoagfpi-s1998,
title = {Canonical Extensive Form of a Game Form: Part I - Symmetries, The},
journal = {Discussion Papers},
number = {186},
year = {1998},
month = {12},
publisher = {In A. Alkan, C.D. Aliprantis \& N.C. Yannelis (eds.), Current Trends in Economics: Theory and Applications (1999) Springer-Verlag 367-387},
abstract = {Within this series of papers we plan to exhibit to any noncooperative game in strategic or normal form a {\textquoteright}canonical{\textquoteright} representation in extensive form that preserves all symmetries of the game. The operation defined this way will respect the restriction of games to subgames and yield a minimal total rank of the tree involved. Moreover, by the above requirements the {\textquoteright}canonical extensive game form{\textquoteright} will be uniquely defined. Part I is dealing with isomorphisms of game forms and games. An auto- morphism of the game is called motion. A symmetry of a game is a permuta- tion which can be augmented to a motion. Some results on the existence of symmetry groups are presented. The context to the notion of symmetry for coalitional games is exhibited.},
url = {/files/dp186.pdf},
author = {Bezalel Peleg, Joachim Rosenmuller and Peter Sudholter}
}
@booklet {leviatan-cvatcicmgwtt1998,
title = {Consistent Values and the Core in Continuum Market Games with Two Types},
journal = {Discussion Papers},
number = {171},
year = {1998},
month = {4},
publisher = {International Journal of Game Theory 31 (2003), 383-410},
abstract = {The consistent value is an extension of the Shapley value to the class of games with non-transferable utility In this paper, the consistent value will be characterized for market games with a continuum of players of 2 types. We will show that for such games the consistent value need not belong to the core, and conditions under which there is equivalence between the two concepts will be given.},
url = {{\textquoteright}},
author = {Sigal Leviatan}
}
@booklet {peleg-ceogwmp1998,
title = {Correlated Equilibria of Games with Many Players},
journal = {Discussion Papers},
number = {185},
year = {1998},
month = {9},
publisher = {International Journal of Game Theory 29 (2000), 375-389},
abstract = {We consider the structure of the set of correlated equilibria for games with a large number n of players. Since the number of equilibrium constraints grows slower than the number of strategy arrays, it might be conjectured that the set of correlated equilibra is large. In this paper we show (1) that the average relative measure of the solution set is smaller than 2^-n, but also (2) that the solution set contains a number c^n of equilibria having disjoint supports with a probability going to I as n grows large. The proof of the latter result hinges on a combinatorial result on the number of nonnegative linear combinations of vectors representing a given point, which may be of independent interest.},
url = {/files/dp185.pdf},
author = {Peleg, Hans Keiding and Bezalel}
}
@booklet {albertblarer-dlofsbfbicf1998,
title = {Does Learning of Flower Size by Foraging Bumblebees Involve Concept Formation?},
journal = {Discussion Papers},
number = {177},
year = {1998},
month = {6},
abstract = {Large flowers often contain larger nectar rewards, and receive more pollinator visits, than small flowers. We studied behavioural mechanisms for the formation of flower preferencce in bumblebees in a two-phase laboratory experiment. Flower-naive Bombus terrestris (L.) foraged on artificial flowers that bore either a big (3.8 cm diameter) or a small (2.7 cm diameter) display of a uniform colour. Only flowers of one display size contained nectar rewards. We changed the display colour and the locations of big and small flowers in the second experimental phase. We recorded the bees{\textquoteright} choices in both trials. 41\% of the bees made their first visit to a small flower. The bees learned to associate display size with food reward, and chose rewarding flowers with > 85\% accuracy by the end of each learning trial. Some learning occured within the bees{\textquoteright} first three flower visits. Learning of the size-reward association was equally good for big and small displays in the first trial, but better for small displays in the second trial. Formation of size-reward associations followed a similar course in both trials. This suggests that the bees did not apply their experience from the first learning trial to the new situation of the second trial. Rather, they treated each phase of the experiment as an independent learning trial. We suggest that pollinators from flower-size preferences through associative learning, and that they may not transfer the concept of "flower size" from one situation to another. Implications for the possible evolution of floral displays are discussed.},
url = {{\textquoteright}},
author = {Albert Blarer, Tamar Keasar and Avi Shmida}
}
@booklet {robertssimon-eigwiwinaioos1998,
title = {Equilibria in Games with Information Which Is Non-Standard and Incomplete on One Side},
journal = {Discussion Papers},
number = {179},
year = {1998},
month = {8},
abstract = {No Abstract},
url = {{\textquoteright}},
author = {Robert S. Simon, Stanislaw Spiez and Henrek Torunczyk}
}
@booklet {solan-ece1998,
title = {Extensive-Form Correlated Equilibria},
journal = {Discussion Papers},
number = {175},
year = {1998},
month = {6},
abstract = {The paper studies extensive-form correlated equilibria in stochastic games. An extensive-form correlated equilibrium is an equilibrium in an extended game, where a correlation device chooses at every stage, as a function of past signals (but independently of the actions of the players) a private signal for each player. We define the notion of individually rational payoffs for these games, and characterize the set of extensive-form correlated equilibrium payoffs using feasible and individually rational payoffs. Our result implies that extensive-form correlated equilibria and communication equilibria are payoff-equivalent in our model.},
url = {/files/dp175.pdf},
author = {Eilon Solan}
}
@booklet {bergman-gropofdwoud1998,
title = {General Restrictions on Prices of Financial Derivatives Written on Underlying Diffusions},
journal = {Discussion Papers},
number = {164},
year = {1998},
month = {1},
abstract = {It is shown that in any diffusive one-factor model of the term structure, the prices of bonds and of term structure puts decrease as the short-term interest rate increases. However, these prices need not be monotone in the short-term rate, if that rate can experience jumps. An important comparative statics implication of the monotonicity resuly for diffusive models is that to a higher short-term interest rate corresponds a yield curve that lies uniformly above the curve that corresponds to a lower short-term rate. Furthermore, if the diffusion that describes the short-term rate is also homogeneous, then two yield curves that are measured at dfferent dates cannot intersect when drawn from the same time origin. If empirically they do intersect, then the short-term rate cannot be described by a one-factor homogeneous diffusion. It is also shown that if the second partial derivative w.r.t. to the short-term interest rate of the drift of the one-factor diffusion describing that rate is less than or equal to 2 - special cases being the linear drift models-then the prices of deterministic-coupon bonds and term structure puts are convex in that rate. The last result is derived using probabilistic representations of solutions to parabolic partial differential equations. The same methodology is used to derive restrictions on prices of European, American, and Asian options when the underlying price follows a stochastic volatility diffusion. Bounds, asymptotic results, and representations are derived for different linear differential transformations of derivative price functions like option{\textquoteleft}s delta, rho, and theta. An example from these results is the fact that the rho of a European call written on a stochastic volatility underlying asset is equal to the price of a digital call with the same exercise price, the same time to expiration, and the same underlying asset as the call, multiplied by the time to expiration and by the exercise price. The methodology is described in sufficient detail to allow for its ready application in a variety of situations.},
url = {/files/dp164.pdf},
author = {Yaacov Z. Bergman}
}
@booklet {winter-gnb1998,
title = {Gradual Nash Bargaining},
journal = {Discussion Papers},
number = {165},
year = {1998},
month = {2},
publisher = {Published in \"Bargaining with an Agenda\" Games and Economic Behavior 48 (2004), 139-153},
abstract = {We propose a model of gradual bargaining in the spirit of the Nash axiomatic theory. In this model the underlying set of payoff opportunities expands continuously with time. Unlike Nash{\textquoteleft}s solution, that predicts a single agreement for each bargaining problem, our solution yields a continuous path of agreements - one for each point in time. It emerges from a simple and intuitive differential equation. We discuss the relationship between the gradual solution and the Nash solution, and characterize it axiomatically by using essentially one property, which is Invariance with Respect to Increasing Transformations. We intetrpret this property as an incentive compatibility requirement. By using the richer framework of gradual bargaining, our aproach avoids some of the shortcomings of Nash{\textquoteleft}s axiomatization. In particular we do not need the controversial axiom of the ILA and the sets of payoff opportunities need not be convex. In the spirit of the Nash Program we propose several non-cooperative bargaining models that sustain our solution. Finally, we apply our model to discuss the allocation of physical (or monetary) assets when individuals{\textquoteleft} risk aversion changes over time.},
url = {/files/dp165.pdf},
author = {Winter, Zvi Wiener and Eyal}
}
@booklet {tamarkeasar-lpofbdmoiti1998,
title = {Learning Performance of Foraging Bees During Manipulation of Inter-Visit Time Intervals},
journal = {Discussion Papers},
number = {176},
year = {1998},
month = {6},
publisher = {Entomologia Generalis 29 (2007), 213-224},
abstract = {It has been repeatedly suggested that bees use short-term information for making food-choice decisions. According to this hypothesis, the elimination of such information should reduce bees{\textquoteright} performance in learning tasks. Naive bumblebees, foraging on differentially-rewarding artificial flowers, were exposed to either 1.5 s or 15 s of darkness between foraging visits. These treatments were intended to diminish the amount of short-term information for the bees{\textquoteright} next foraging choice. The bees{\textquoteright} flower choices were compared to the choices of untreated controls. Control-treatment bees chose rewarding flowers significantly more often then short-darkness (1.5 s) bees. Shifts between flowers of different colors were more frequent during long inter-visit intervals than during short inter-visit intervals in the control treatment, but not in the darkness treatments. The results suggest that short-term experience, when available, improves the choice performance of bees. However, possible effects of darkness itself on decision-making were not controlled for, and require further study.},
url = {{\textquoteright}},
author = {Tamar Keasar, Inbal Fershtman, Rivka Forotan and Avi Shmida}
}
@booklet {haimanko-nvonamg1998,
title = {Non-Symmetric Values of Non-Atomic and Mixed Games},
journal = {Discussion Papers},
number = {168},
year = {1998},
month = {3},
publisher = {Mathematics of Operations Research 25 (2000), 591-605},
abstract = {This paper presents a new unifying approach to the study of nonsymmetric (or quasi-) valuesof nonatomic and mixed games. A family of path values is defined, using an appropriate generalization of Mertens diagonal formula. A path value possesses the following intuitive description: consider a function (path) gamma attaching to each player a distribution function on [0; 1]. We think of players as arriving randomly and independently to a meeting when the arrival time of a player is distributed according to gamma. Each player s payoff is defined as his marginal contribution to thecoalition of players that have arrived earlier.Under certain conditions on a path, different subspaces of mixed games (pNA; pM; bv{\textquoteright}FL) areshown to be in the domain of the path value. The family of path values turns out to be verywide - we show that on pNA;pM and their subspaces the path values are essentially the basicconstruction blocks (extreme points) of quasi-values.},
url = {{\textquoteright}},
author = {Ori Haimanko}
}
@booklet {ullmann-margalit-onwtk1998,
title = {On Not Wanting to Know},
journal = {Discussion Papers},
number = {174},
year = {1998},
month = {6},
publisher = {In Edna Ullmann-Margalit (ed.), Reasoning Practically, New York: Oxford University Press, 2000, 72-84},
abstract = {A common assumption of practical reasoning is that, in order to act rationally, agents are to act on the basis of the totality of evidence available to them. Common practice and introspection, however, suggest that people often do not want to know. The paper explores various aspects of the phenomenon of not wanting to know in an attempt to find out whether it is inherently unreasonable. The exploration leads, first, to weakeningthe principle of total evidence through replacing it with a rebuttable presumption in favor of additional knowledge. The sustainability of this presumption is then examined in light of the large variety of circumstances in which it seems to be reasonably rebutted. The alternative which in the end is recommended is to give up both the general principle and the presumption, and adopt instead something like a case by case cost-benefit approach, where the value of additional knowledge is matched up against its cost. In the process, the key notions of available knowledge, the value of knowledge , and the cost of knowledge are elucidated; also, separate attention is given to the question whether not wanting to know may sometimes be argued to be either morally required or morally reprehensible.},
url = {/files/db174.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {haimanko-psv1998,
title = {Partially Symmetric Values},
journal = {Discussion Papers},
number = {169},
year = {1998},
month = {3},
publisher = {Mathematics of Operations Research 25 (2000), 573-590},
abstract = {We investigate values of differentiable non-atomic and mixed games, in the situation where there are several types of players, and replacements are allowed only wiyhin each type. We show that if the types are considerably large, then the values are the path values and their mixtures (i.e., the path is random). In particular, the symmetric values on pM are characterized, as mixtures of values defined in [Hart (1973)].},
url = {{\textquoteright}},
author = {Ori Haimanko}
}
@booklet {gneezy-pcbt1998,
title = {Price Competition Between Teams},
journal = {Discussion Papers},
number = {184},
year = {1998},
month = {9},
publisher = {Experimental Economics 5 (2002), 29-38.},
abstract = {Economic agents (e.g., firms, corporations) are often treated as unitary players. The internal organization of these agents and, in particular, the possibility of conflicting interests within agents, is overlooked. The present study uses an experimental approach to examine whether market performance is sensitive to the violation of the unitary player assumption. Toward this goal, we modeled a duopolistic market as a team game involving two teams with three members in each team. Each player simultaneously demanded a price and the team whose total demand was lower won the competition and was paid its price. The losing team was paid nothing. In case of a tie, each team was paid half its price. This composite duopoly was studied under two conditions; one in which the team{\textquoteright}s profit was divided equally amongst its members (and, hence, each team could be considered a unitary player) and another in which each individual member was paid her own price. Based on the reinforcement learning principle as modeled by Roth and Erev (1995), we predicted that convergence to the competitive price would be much faster in the former treatment than in the latter. The experimental results strongly confirmed this prediction.},
url = {/files/dp184.pdf},
author = {Gneezy, Gary Bornstein and Uri}
}
@booklet {mottyperry-asatmtea1998,
title = {A Sealed-Bid Auction That Matches the English Auction},
journal = {Discussion Papers},
number = {181},
year = {1998},
month = {8},
publisher = {Games and Economic Behavior 33 (2000), 265-273.},
abstract = {This paper analyzes a two-stage sealed-bid auction that is frequently employed in privatization, takeover, and merger and acquisition contests. This auction format yields the same expected revenue as the open ascending (English) auction, yet is less susceptible to preemptive bidding and collusion.},
url = {/files/dp181.pdf},
author = {Motty Perry, Elmar Wolfstetter and Shmuel Zamir}
}
@booklet {ullmann-margalit-sd1998,
title = {Second-Order Decisions},
journal = {Discussion Papers},
number = {178},
year = {1998},
month = {7},
publisher = {Ethics 110 (1999), 5-31.},
abstract = {People are often reluctant to make decisions by calculating the costs and benefits of alternative courses of action in particular cases. Knowing, in addition that they may err, people and institutions often resort to second-order strategies for reducing the burdens of, and risk of error in, first-order decisions. They make a second order decision when they choose one from among such possible strategies. They adopt rules of presumptions; they create standards; they delegate authority to others; they take small steps; they pick rather than choose. Some of these strategies impose high costs before decision but low costs at the time of ultimate decision; others impose low costs both before and at the time of ultimate decision; still others impose low costs before decision while exporting to others the high costs at the time of decision. We assess these second-order strategies and provide grounds for choosing among them in both legal and nonlegal contexts, by exploring the extent to which they minimize the overall costs of decision and costs of error. We also attempt to cast light on political, legal, and ethical issues raised by second-order decisions.},
url = {/files/dp178.pdf},
author = {Ullmann-Margalit, Cass R. Sunstein and Edna}
}
@booklet {mas-colell-asapltceodp1998,
title = {A Simple Adaptive Procedure Leading to Correlated Equilibrium (revision of Discussion Paper $\#$126)},
journal = {Discussion Papers},
number = {166},
year = {1998},
month = {3},
publisher = {Econometrica 68 (2000), 1127-1150},
abstract = {We propose a new and simple adaptive procedure for playing a game: " regretmatching." In this procedure, players depart from their current play with probabilities that are proportional to measures of regret for not having used other strategies in the past. It is shown that our adaptive procedure guarantees that, with probability one, the empirical distributions of play converge to the set of correlated equilibria of the game. To compute these regret measures, a player needs to know his payoff function and the history of play. We also offer a variation where every player knows only his own realized payoff history ( but not his payoff function).},
url = {/files/ adapt.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {guth-ssaesishtcoes1998,
title = {Species Survival and Evolutionary Stability in Sustainable Habitats: The Concept of Ecological Stability},
journal = {Discussion Papers},
number = {180},
year = {1998},
month = {6},
publisher = {Journal of Evolutionary Economics 10 (2000), 437-447},
abstract = {Whover exists belongs to a species, which did not become extinct, has a (geno-)type, which should be well adjusted, and lives in a habitat which has been sustainable for a long time. To capture the first aspect we allow for interspecies competition and analyze the conditions for species survival. The second aspect refers to success in intraspecies competition of (geno-)types as in evolutionary biology and game theory. Survival in inter- and intraspecies competition together with sustainability define ecological stability, a concept which we illustrate by an example of solitary and social grazers who compete for food supply and who are endangered by the same predators. Although our approach is inspired by empirical evidence, no systematic attempt is made to apply it to some specific ecology.},
url = {/files/dp180.pdf},
author = {Guth, Robert Aumann and Werner}
}
@booklet {mayabar-hillel-ttcpas1998,
title = {Torah Codes: Puzzle and Solution, The},
journal = {Discussion Papers},
number = {163},
year = {1998},
month = {1},
publisher = {Chance 11 (1998), 13-19},
abstract = {In 1994, Statistical Science published astonishing statistical evidence proving the existence of a hidden code in the book of Genesis, relating to future events. New research deprives this evidence of its import by proving that the same code can be found in the Hebrew translation of War and Peace.},
url = {/files/dp163.pdf},
author = {Maya Bar-Hillel, Dror Bar-Natan, Brendan McKay}
}
@booklet {gilkalai-ttfrehsits1998,
title = {Two Famous Rabbis Experiments: How Similar Is Too Similar?, The},
journal = {Discussion Papers},
number = {182},
year = {1998},
month = {9},
abstract = {Witztum, Rips and Rosenberg describe the outcomes of two experiments which purport to statistically prove the existence of a hidden code in the Book of Genesis. We show that these two experiments, viewed as two random samples from the same pop- ulation, yielded numerical outcomes which are more similar to each other than expected. We also show that the distributions obtained in some control experiments performed by Witztum et al. are flatter than expected. Our hypothesis is that Witztum et al. tailored their experimental procedures to meet naive expectations regarding how outcomes of ex- perimental replication and experimental controls should look. We give some statistical and empirical evidence supporting this hypothesis.},
url = {/files/dp182.pdf},
author = {Gil Kalai, Brendan McKay, Maya Bar-Hillel}
}
@booklet {okada-trgwfa1998,
title = {Two-Person Repeated Games with Finite Automata},
journal = {Discussion Papers},
number = {173},
year = {1998},
month = {5},
publisher = {International Journal of Game Theory 29 (2000), 309-325.},
abstract = {We study two-person repeated games in which a player with a restricted set of strategies plays against an unrestricted player. An ex- ogenously given bound on the complexity of strategies, which is measured by the size of the smallest automata that implement them, gives rise to a restriction on strategies available to a player. We examine the asymptotic behavior of the set of equilibrium payoffs as the bound on the strategic complexity of the restricted player tends to infinity, but sufficiently slowly. Results from the study of zero sum case provide the individually rational payoff levels. In addition we will explicitly construct the punishment strategy of the unrestricted player with certain uniform properties.},
url = {/files/dp173.pdf},
author = {Okada, Abraham Neyman and Daijiro}
}
@booklet {haimanko-vtws1998,
title = {Value Theory Without Symmetry},
journal = {Discussion Papers},
number = {167},
year = {1998},
month = {3},
publisher = {International Journal of Game Theory 29 (2000), 451-468.},
abstract = {We investigate the non-symmetric values of finite games on a given, possibly finite, univrse of players. It turns out that in the case of values symmetric with respect to some coalitional structure with infinite elements (types), the axioms are powerful enough to force such a value to be a mixture of the random arrival values (or path value in the sense of [Owen(1973)], with identically distributed random arrival times of players inside the same type. The general non-symmetric values are shown to be the random order values (as in[Weber(1988)] for a finite univrse). The non-symmetric semivalues and those symmetric with respect to a coalitional structure with large types are also completely characterized.},
url = {/files/dp167.pdf},
author = {Ori Haimanko}
}
@booklet {neyman-vonvmg1998,
title = {Values of Non-Atomic Vector Measure Games},
journal = {Discussion Papers},
number = {183},
year = {1998},
month = {11},
publisher = {Israel Journal of Mathematics 124 (2001), 1-27.},
abstract = {There is a value (of norm one) on the closed space of games that is generated by all games of bounded variation f o mu where mu is a vector of non-atomic probability measures and f is continuous at 0=mu(varnothing) and at mu(I).},
url = {/files/dp183.pdf},
author = {Abraham Neyman}
}
@booklet {salvadorbarbera-vfvamoee1998,
title = {Voting for Voters: A Model of Electoral Evolution},
journal = {Discussion Papers},
number = {170},
year = {1998},
month = {4},
abstract = {We model the decision problems faced by the members of societies whose new members are determined by vote. We adopt a number of simplifying assumptions: the founders and the candidates are fixed; the society operates for k periods and holds elections at the beginning of each period; one vote is sufficient for admission, and voters can support as many candidates as they wish; voters assess the value of the streams of agents with whom they share the society, while they belong to it. In spite of these simplifications, we show that interesting strategic behavior implied by the dynamic structure of the problem: the vote for friends may be postponed, and it may be advantageous to vote for enemies. We discuss the exsitence of different types of equilibria in pure strategic and point out interesting equilibria in mixed strategies.},
url = {/files/dp170.pdf},
author = {Salvador Barbera, Michael Maschler and Jonathan Shalev}
}
@booklet {solan-3rgwas1997,
title = {3-Person Repeated Games with Absorbing States},
journal = {Discussion Papers},
number = {128},
year = {1997},
month = {1},
abstract = {Every 3-person repeated game with absorbing states has an equilibrium payoff.},
url = {{\textquoteright}},
author = {Eilon Solan}
}
@booklet {peleg-aaeidsac1997,
title = {Almost All Equilibria in Dominant Strategies Are Coalition-Proof},
journal = {Discussion Papers},
number = {156},
year = {1997},
month = {9},
publisher = {Economics Letters 60 (1998), 157-162},
abstract = {Almost all equilibria in dominant strategies of finite strategic games are coalition-proof.},
url = {/files/dp156.pdf},
author = {Bezalel Peleg}
}
@booklet {peleg-aaotwciids1997,
title = {An Axiomatization of the Walras Correspondence in Infinite Dimensional Spaces},
journal = {Discussion Papers},
number = {131},
year = {1997},
month = {2},
publisher = {International Economic Review 38 (1997), 853-864. Also In: The Legacy of Leon Walras, Vol. 2, Intellectual Legacies in Modern Economics, Vol 7, D. A. Walker (ed.), Elgar Reference Collection (2001), 618-629},
abstract = {This paper presents a generalization of the results of van den Nouweland, Peleg and Tijs on the axiomatization of the Walras correspondence to generalized (pure exchange) economies where the commodity space is the positive cone in an ordered locally convex topological vector space. Our main result characterizes the Walras correspondence completely over an "acceptable" class of economies in terms of consistency, converse consistency, and weak versions of Pareto optimality and non-emptiness. Important examples of economies that are "acceptable" are given in detail.},
url = {/files/dp131.pdf},
author = {Peleg, Mukul Majumdar and Bezalel}
}
@booklet {neyman-cirgwtnosinckodp1997,
title = {Cooperation in Repeated Games When the Number of Stages Is Not Commonly Known (revision of Discussion Paper $\#$65)},
journal = {Discussion Papers},
number = {162},
year = {1997},
month = {1},
publisher = {Econometrica 67 (1999), 45-64.},
url = {{\textquoteright}},
author = {Abraham Neyman}
}
@booklet {pitowsky-cpatgollip1997,
title = {Correlation Polytopes and the Geometry of Limit Laws in Probability},
journal = {Discussion Papers},
number = {146},
year = {1997},
month = {6},
abstract = {No Abstract},
url = {{\textquoteright}},
author = {Itamar Pitowsky}
}
@booklet {samet-ciw1997,
title = {Counterfactuals in Wonderland},
journal = {Discussion Papers},
number = {134},
year = {1997},
month = {3},
abstract = {The literary source of the main ideas in Aumann{\textquoteright}s article "Backward Induction and Common Knowledge of Rationality" is exposed and analyzed. The primordial archetypal images that underlie both this literary source and Aumann{\textquoteright}s work are delineated and are used to explain the great emotive impact that this work had on the community of game theorists.},
url = {/files/dp134.pdf},
author = {Dov Samet}
}
@booklet {simon-tdbckofas1997,
title = {Difference Between Common Knowledge of Formulas and Sets, The},
journal = {Discussion Papers},
number = {141},
year = {1997},
month = {5},
publisher = {International Journal of Game Theory 28 (1999), 367-384.},
abstract = {This article concerns the interactive model propositional calculus, using the multi-agent epistemic logic S5. With regard to the space \% of maximally consistent sets of formulas, the knowledge of an agent is defined by its knowledge of a set of formulas. Common knowledge can be defined in at least two ways, as the common knowledge of a set of formulas or according to the meet partition generated by the knowledge partitions of the agents. With at least two agents, this meet partition is a much finer partition of \% than that generated by the common knowledge of sets of formulas, yet for some points of \% the two partition members coincide. Whether the two partition members coincide has radical implications for the structure of the meet partition members.},
url = {/files/dp141.pdf},
author = {Robert S. Simon}
}
@booklet {levy-edniia1997,
title = {Efficiency Does Not Imply Immediate Agreement},
journal = {Discussion Papers},
number = {153},
year = {1997},
month = {7},
publisher = {Econometrica 67 (1999), 909-912},
abstract = {Gul (1989) introduces a non-cooperative bargaining procedure and claims that the payoffs of the resulting efficient stationary subgame perfect equilibria are close to the Shapley value of the underlying transferable utility game (when the discount factor is close to 1). We exhibit here an example showing that efficiency, even for strictly super-additive games, does not imply that all meetings end in agreement. Thus efficiency does not suffice to get Gul{\textquoteright}s result.},
url = {/files/ gul.html},
author = {Levy, Sergiu Hart and Zohar, A.,}
}
@booklet {perry-emd1997,
title = {Efficient Mechanism Design},
journal = {Discussion Papers},
number = {133},
year = {1997},
month = {3},
abstract = {We study Bayesian mechanism design in the context of multidimensional types and quasi-linear preferences. We first show that any two incentive compatible mechanisms which implement the same allocation rule must be payoff equivalent up to an additive constant. This result is then applied to study multiple object auctions. We show that the Vickrey-Clarke-Groves auction maximizes the seller{\textquoteright}s expected revenue among all efficient auctions.},
url = {/files/dp133.pdf},
author = {Perry, Vijay Krishna and Motty}
}
@booklet {zamir-aewubiace1997,
title = {An Experiment with Ultimatum Bargaining in a Changing Environment},
journal = {Discussion Papers},
number = {159},
year = {1997},
month = {12},
publisher = {Published in \"Bargaining with an Agenda\" Games and Economic Behavior 48 (2004), 139-153},
abstract = {We have obtained experimental results on the ultimatum bargaining game that support an evolutionary explanation for subjects{\textquoteright} behavior in the game. In these experiments we have created enviornments in which subjects interact with each other in addition to interacting with virtual players, i.e. computer programs with pre-specified stategies. Some of these virtual players were designed to play the equitable allocation, while others exhibited behavior closer to the subgame-perfect equilibrium, in which the proposer{\textquoteright}s share is much larger than that of the responder. We have observed significant differences in the behavior of real subjects depending on the type of "mutants" (virtual players) that were present in their enviornment.},
url = {/files/dp159.pdf},
author = {Zamir, Eyal Winter and Shmuel}
}
@booklet {tang-erotcginfaiol1997,
title = {Experimental Results on The Centipede Game in Normal Form: An Investigation on Learning},
journal = {Discussion Papers},
number = {149},
year = {1997},
month = {6},
publisher = {Journal of Mathematical Psychology 42 (1998), 356-384.},
abstract = {We analyze behavior of an experiment on the repeated centipede game played in the reduced normal form. In this game 2 players decide simultaneously when to split a cake. The longer both players wait, the higher the total gain for both. The player who is less patient to wait obtains the larger share of the pie while the other obtains the lower share of the pie. In all standard game theoretic predictions the outcome is that the pie is split immediately. We compare several static models and ouantative learning models, among them quantal response, reinforcement models and fictitious play. Furthermore, we structure behavior from period to period according to a simple cognitive process, called learning direction theory. It is shown that there is a significant difference in behavior whether a player has observed that he got the larger share of the pie or whether he got the smaller share of the pie.},
url = {/files/dp149.pdf},
author = {Tang, Rosemarie Nagel and Fang-Fang}
}
@booklet {reny-otfotlpima1997,
title = {On The Failure of the Linkage Principle in Multi-Unit Auctions},
journal = {Discussion Papers},
number = {157},
year = {1997},
month = {9},
publisher = {Econometrica 67 (1999), 895-900.},
abstract = {It is shown that the linkage principle (Milgrom and Weber(1982)) does not extend to the multi-unit auction setting. An analysis of the equilibium bidding strategies is carried out for the gneral two-agent/two-unit Vickrey auction in order to provide economic insight into the nature of the failure. In addition, an explicit counterexample is provided.},
url = {{\textquoteright}},
author = {Reny, Motty Perry and Philip J.}
}
@booklet {armandogomes-fhbatcf1997,
title = {Finite Horizon Bargaining and the Consistent Field},
journal = {Discussion Papers},
number = {137},
year = {1997},
month = {4},
publisher = {Games and Economic Behavior, 27 (1999), 204-228},
abstract = {This paper explores the relationships between noncooperative bargaining games and the consistent value for non-transferable utility (NTU) cooperative games. A dynamic approach to the consistent value for NTU games is introduced: the consistent vector field. The main contribution of the paper is to show that the consistent field is intimately related to the concept of subgame perfection for finite horizon noncooperative bargaining games, as the horizon goes to infinity and the cost of delay goes to zero. The solutions of the dynamic system associated to the consistent field characterize the subgame perfect equilibrium payoffs of the noncooperative bargaining games. We show the for transferable utility, hyperplane and pure bargaining games, the dynamics of the consistent field converge globally to the unique consistent value. However, in the general NTU case, the dynamics of the consistent field can be complex. An example is constructed where the consistent field has cyclic solutions; moreover, the finite horizon subgame perfect equilibria do not approach the consistent value.},
url = {/files/ consf.html},
author = {Armando Gomes, Sergiu Hart and Andreu Mas-Colell}
}
@booklet {neyman-frgwfaodp1997,
title = {Finitely Repeated Games with Finite Automata (revision of Discussion Paper $\#$69)},
journal = {Discussion Papers},
number = {161},
year = {1997},
month = {9},
publisher = {Mathematics of Operations Research 23 (1998), 513-552.},
abstract = {{The paper studies the implications of bounding the complexity of the strategies players may select, on the set of equilibrium payoffs in repeated games. The complexity of a strategy is measured by the size of the minimal automaton that can implement it. A finite automaton has a finite number of states and an initial state. It prescribes the action to be taken as a function of the current state and a transition function changing the state of the automaton as a function of its current state and the present actions of the other players. The size of an automaton is its number of states. The main results imply in particular that in two person repeated games, the set of equilibrium payoffs of a sequence of such games. G(n)},
url = {/files/dp161.pdf},
author = {Abraham Neyman}
}
@booklet {simon-tgofhick1997,
title = {Generation of Formulas Held in Common Knowledge, The},
journal = {Discussion Papers},
number = {150},
year = {1997},
month = {6},
publisher = {International Journal of Game Theory 30 (2001), 1-18.},
abstract = {This ariticle concerns the interactive modal propositional calculus, using the multi-agent epistemic logic S5. With regard to the space of maximally consistent sets of formulas, the relations between three aspects of common knowledge are investigated: 1) whether common knowledge defined semantically is determined by the set of formulas held in common knowledge, 2) the partial order by inclusion of the sets of formulas that can be held in common knowledge, and 3) the cardinality of a generating set of formulas for those held in common knowledge. Additionally, assuming at least two agents, it is shown that the number of connected components of holding only the tautologies in common knowledge has the cardinality of the continuum.},
url = {/files/dp150.pdf},
author = {Robert S. Simon}
}
@booklet {ullmann-margalit-afwasghmofasoi1997,
title = {{\textquoteright}He Asked for Water and She Gave Him Milk{\textquoteright}: On Fulfillment and Satisfaction of Intentions},
journal = {Discussion Papers},
number = {147},
year = {1997},
month = {6},
publisher = {In L.E. Hahn (ed.) The Library of Living Philosophers 26 (1999) The Philosophy of Donald Davidson 483-496},
abstract = {In this paper I draw a distiction between fulfilling an intention and satisfying it. This distinction enables me to argue that, contrary to what is often assumed, intention is not a purely internal relation. I take this point, which goes against Wittgenstein, to be supportive - in an indirect but principled way - of Davidson{\textquoteright}s causal theory of reasons, or intentions. At the same time, however, the fulfillment/satisfaction distinction seems to allow for the possibility that an intention will be partially determined retroactively, by later events. If I am right that after-facts may indeed constitute, at least in part, the intention with which an action was performed, then this poses a problem for the causal theory of intentions, as well as for ordinary models of rational action.},
url = {/files/dp147.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {brunobassan-dwtkcibr1997,
title = {{\textquoteright}I Don{\textquoteright}t Want to Know !{\textquoteright}: Can It Be Rational?},
journal = {Discussion Papers},
number = {158},
year = {1997},
month = {11},
abstract = {In this paper we will show that the usually accepted principle of decision theory that the "the more information the better" seemingly breaks down in stategic contexts. We will show through several examples that almost every situation is conceivable: Information can be beneficial for all the players, or only for the one who receives it,or, less intuitively, just for the one who does not receive it, or it could be bad for both. The only class of games that escapes these seemingly surprising phenomena is the class of zero-sum games, but only under the assumption of common beliefs for the players. We will show that even aminor departure from the assumptions of zero-sum and common beliefs can produce the phenomenon of information-rejection. We will show that these phenomena may appear even in coordination games, where one would expect that public information should facilitate coordination. It should be emphasized that there is here neither a pathology nor a paradox: aside from the particular examples that may merit attention, the message is that in an interactive decision framework with incomplete information, the relevant issue is that of interactive knowledge rather than simply knowledge per se.},
url = {/files/dp158.pdf},
author = {Bruno Bassan, Olivier Gossner, Marco Scarsini and Shmuel Zamir}
}
@booklet {danielrothenstein-iigot1997,
title = {Imperfect Inspection Games Over Time},
journal = {Discussion Papers},
number = {151},
year = {1997},
month = {6},
publisher = {Annal of Operations Research 109, 175-192 (2002)},
abstract = {We consider an inspection game played on a finite time interval. The inspector wishes to detect a violation as soon as possible after it has been made by the operator. The loss to the inspector is assumed to be linear in the duration of the time elapsed between the violation and its detection. The inspection is not observed by the operator unless the inspector calls an alarm. The inspection is imperfect; it has a Type One Error which means that the inspector may call a false alarm (with probability alfa), and a Type Two Error which means that inspection may fail to detect (with probability beta) a violation which did occur. We first solve the game when alfa and beta are fixed and given. Then we consider the more general model in which the error probability alfa is chosen strategically by the inspector and may depend on the time of inspection. This yields two equilibria; one with constant alfa (and beta) and one with alfa increasing in time. The latter cannot be solved analytically. Consequently we solve a numerical example in which the inspction consist of obsderving a normally distributed signal.},
url = {/files/dp151.pdf},
author = {Daniel Rothenstein,}
}
@booklet {peleg-iotcoamp1997,
title = {Implementation of the Core of a Marriage Problem},
journal = {Discussion Papers},
number = {132},
year = {1997},
month = {2},
abstract = {We consider the prosaic system of matching which is specified by the following two common rules: (i) Each woman (man) proposes to at most one man (woman). (ii) A man and a woman marry each other if they propose to each other. Weprove that this system implements the correspondence of stable matchings by strong Nash Equilibria. We also find a simple extensive game form which implements the same correspondence by subgame perfect equilibria.},
url = {{\textquoteright}},
author = {Bezalel Peleg}
}
@booklet {yaniv-iagbitugagmp1997,
title = {Individual and Group Behavior in the Ultimatum Game: Are Groups More {\textquoteright}Rational{\textquoteright} Players?},
journal = {Discussion Papers},
number = {154},
year = {1997},
month = {9},
publisher = {Experimental Economics 1 (1998), 101-108},
abstract = {This paper reports two ultimatum game experiments comparing the behavior of individuals with that of three-person groups. Group members conducted a short face-to-face discussion in order to decide, as a collective, on a proposed division or on whether to accept or reject a proposal. Both experiments found that groups offered significantly less than individuals. But, as indicated by the low rejection rate in both treatments, groups were also willing to accept less.},
url = {/files/dp154.pdf},
author = {Yaniv, Gary Bornstein and Ilan}
}
@booklet {ullmann-margalit-tihatcor1997,
title = {Invisible Hand and the Cunning of Reason, The},
journal = {Discussion Papers},
number = {143},
year = {1997},
month = {5},
publisher = {Social Research 64 (1997), 181-198},
abstract = {This paper traces the ideological career of the notion of the invisible hand, from the 18th century to the 20th. Two main models of invisible-hand explanations are distinguished: the aggregative and the evolutionary. The argument is made that the contemporary use of the idea of the invisible hand by conservatives as against liberals and social planners springs from not distinguishing between these two models. The latter part of the paper draws a comparison between the idea of the invisaible hand and Hegel{\textquoteright}s historically-related idea of the cunning of reason.},
url = {/files/db143.pdf},
author = {Edna Ullmann-Margalit}
}
@booklet {solan-mm1997,
title = {(Min Max)2=Min Max},
journal = {Discussion Papers},
number = {127},
year = {1997},
month = {1},
abstract = {A repeated game with absorbing states is played over the infinite future. A fixed one-shot game is played over and over again. However, for each action combination there is a probability that once it has occurred all future payoffs for the players are constant (that depends on the action combination that caused the "termination"), whatever the players play in the future. Given such a game, we define a modified game, by changing the payoff function. The new daily payoff for each player is the minimum between his expected payoff given the mixed-actions the players play in this stage, and his min-max value of the original game. Clearly the min-max value of the modified game, when the players are restricted to pure strategies (i.e. they cannot lotter between mixed-actions) cannot exceed the min-max value of the original game. We prove that the two values are equal.},
url = {{\textquoteright}},
author = {Eilon Solan}
}
@booklet {peleg-namocsf1997,
title = {Nucleoli as Maximizers of Collective Satisfaction Functions},
journal = {Discussion Papers},
number = {129},
year = {1997},
month = {1},
publisher = {Social Choice and Welfare 15 (1998), 383-411},
abstract = {Two preimputations of a given TU game can be compared via the Lorenz order applied to the vectors of satisfactions. One preimputation is {\textquoteleft}socially more desirable{\textquoteright} than the other, if its corresponding vector of satisfactions Lorenz dominates the satisfaction vector with respect to the second preimputation. It is shown that the prenucleolus, the anti-prenucleolus, and the modified nucleolus are maximal in this Lorenz order. Here the modified nucleolus is the unique preimputation which lexicographically minimizes the envies between the coalitions, i.e. the differences of excesses. Recently Sudh?lter developed this solution concept. Properties of the set of all undominated preimputations, the maximal satisfaction solution, are discussed. A function on the set of preimputations is called collective satisfaction function if it respects the Lorenz order. We prove that both classical nucleoli are unique minimizers of certain {\textquoteleft}weighted Gini inequality indices{\textquoteright}, which are derived from some collective satisfaction functions. For the (pre)nucleolus the function proposed by Kohlberg, who characterized the nucleolus as a solution of a single minimization problem, can be chosen. Finally, a collective satisfaction function is defined such that the modified nucleolus is its unique maximizer.},
url = {/files/db129.pdf},
author = {Peleg, Peter Sudholter and Bezalel}
}
@booklet {samuel-cahn-omsr1997,
title = {Optimal Multivariate Stopping Rules},
journal = {Discussion Papers},
number = {135},
year = {1997},
month = {3},
publisher = {Journal of Applied Probability 35 (1998), 693-706},
abstract = {For fixed i let X(i)=(X1(i),...,Xd(i) be a d-dimensional random vector with some known joint distribution. Here i should be considered a time variable. Let X(i)=1,...,n be a sequence of n independent vectors, where n is the total horizon. In many examples Xj(i) can be thought of as the return to partner j, when there are d2 partners, and one stops with the i-th observation. If the j-th partner alone could decide on a (random) stopping rule t, his goal would be to maximize EXj(t) over all possible stopping rules tn. In the present "multivariate" setup the d partners must however cooperate and stop at the same stopping time t, so as to maximize some agreed upon function h( ) of the individual expected returns. The goal is thus to find a stopping rule t* for which h(EX1(t),...,EXd(t)=h(EX(t) is maximized. For continuous and monotone h we describe the class of optimal stopping rules t*. With some additional symmetry assumptions we show that the optimal rule is one which (also) maximizes EZt where Zi= Xj(i), and hence has a particularly simple structure. Examples are included, and the results are extended both to the infinite horizon case and to the case when X(1),..., X(n) are dependent. Asymptotic comparisons between the present problem of finding sup h(E X(t)) and the "classical" problem of finding sup Eh( X(t)) are given. Comparisons between the optimal return to the statistician and to a "prophet" are also included. In the present context a "prophet" is someone who can base his (random)choice g on the full sequence X(1),..., X(n), with corresponding return sup h(E X(g)).},
url = {/files/dp135.pdf},
author = {Samuel-Cahn, David Assaf and Ester}
}
@booklet {foster-paaoje1997,
title = {Precision and Accuracy of Judgmental Estimation},
journal = {Discussion Papers},
number = {138},
year = {1997},
month = {4},
publisher = {Journal of Behavioral Decision Making 10 (1997), 21-32},
abstract = {Whereas probabilistic calibration has been a central normative concept of accuracy in previous research on interval estimates, we suggest here that normative approaches for the evaluation of judgmental estimates should consider the communicative interaction between the individuals who produce the judgements and those who receive or use them for making decisions. We analyze precision and error in judgement and consider the role of accuracy-informativeness trade-off (Yaniv \& Foster, 1995) in the communication of estimates. The results shed light on puzzling findings reported earlier in the literature concerning the calibration accuracy of subjective confidence intervals.},
url = {/files/dp138.pdf},
author = {Foster, Ilan Yaniv and Dean}
}
@booklet {samuel-cahn-parvep1997,
title = {P-Values as Random Variables; Expected P-Values},
journal = {Discussion Papers},
number = {155},
year = {1997},
month = {9},
publisher = {The American Statistician 53 (1999), 326-331},
abstract = {P-values for hypotheses are considered as random variables. Their expected value (EPV) is expressed in a simple form. In simple examples they are directly computable, also under the alternative hypothesis, and in more complicated examples they are easily simulated. Their major advantage is that they do not depend on any significant level. It is suggested that the use of EPV can replace the use of power, which is always significance level dependent EPV can also be used for comparison of tests when more than one test is available for a given hypothesis. Examples are given, as well as tables which relate significance level and power to EPV. A comparison of the two-sample one-sided Kolmogorov-Smirnov, Mann-Whitney and t tests is included, for a variety of underlying distributions.},
url = {{\textquoteright}},
author = {Samuel-Cahn, Harold Sackrowitz and Ester}
}
@booklet {solan-rtgwas1997,
title = {Repeated Team Games with Absorbing States},
journal = {Discussion Papers},
number = {152},
year = {1997},
month = {7},
abstract = {Two teams meet every day to play the same matrix game. Every entry in the matrix contains five numbers: a payoff that each player in the first team receives whenever this entry is chosen, a similar payoff for the players of the second team, a probability that once this entry is chosen the game becomes "static", a payoff that each player in the first team receives in each future day if the game becomes "static" by this entry, and a similar payoff for the players of the second team.We prove that every such game has an equilibrium payoff.},
url = {{\textquoteright}},
author = {Eilon Solan}
}
@booklet {simon-sojpepftmf1997,
title = {Separation of Joint Plan Equilibrium Payoffs from the Min-Max Functions},
journal = {Discussion Papers},
number = {142},
year = {1997},
month = {5},
publisher = {Games and Economic Behavior 41 (2002), 79-102},
abstract = {This article concerns infinitely repeated and un-discounted two-person non-zero-sum games of incomplete information on one side. Following the spirit of the Folk Theorem it establishes sufficient conditions for the existence of Nash equilibria with payoffs superior to what the players would receive from observable deviation. Examples are presented that show both the difficulty and the desirability for stronger results than those presented here.},
url = {{\textquoteright}},
author = {Robert S. Simon}
}
@booklet {mas-colell-asapltce1997,
title = {A Simple Adaptive Procedure Leading to Correlated Equilibrium},
journal = {Discussion Papers},
number = {126},
year = {1997},
month = {1},
publisher = {(revised indp $\#$166)},
abstract = {We propose a simple adaptive procedure for playing a game. In this procedure, players depart from their current play with probabilities that are proportional to measures of regret for not having used other strategies (these measures are updated every period). It is shown that our adaptive procedure guaranties that with probability one, the sample distributions of play converge to the set of correlated equilibria of the game. To compute these regret measures, a player needs to know his payoff function and the history of play. We also offer a variation where every player knows only his own realized payoff history (but not his payoff function).},
url = {http://adapt.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {maschler-sng1997,
title = {Spanning Network Games},
journal = {Discussion Papers},
number = {144},
year = {1997},
month = {3},
publisher = {International Journal of Game Theory 27 (1998), 467-500.},
abstract = {We study fundamental properties of monotone network enterprises which contain public vertices and have positive and negative costs on edges and vertices. Among the properties studied are the nonemptiness of the core, characterization of nonredundent core constraints, ease of computation of the core and the nucleolus, and cases of decomposition of the core and the nucleolus.},
url = {/files/dp144.pdf},
author = {Maschler, Daniel Granot and Michael, Howlett,}
}
@booklet {davidassaf-asvopi1997,
title = {A Statistical Version of Prophet Inequalities},
journal = {Discussion Papers},
number = {136},
year = {1997},
month = {3},
publisher = {The Annals of Statistics 26 (1998), 1190-1197},
abstract = {All classical "prophet inequalities" for independent random variables hold also in the case where only a noise corrupted version of those variables is observable. That is, if the pairs (X1, Z1),...,(Xn,Zn) are independent with arbitrary, known joint distributions, and only the sequence Z1,...,Zn is observable, then all prophet inequalities which hold if the X{\textquoteright}s were directly observable still hold, even though the expected X-values (i.e. the payoffs) for both the and statistician, will be different. Our model includes, for example, the case when Zi=Xi+Yi, where the Y{\textquoteright}s are any sequence of independent random variables.},
url = {/files/dp136.pdf},
author = {David Assaf, Larry Goldstein and Ester Samuel-Cahn}
}
@booklet {solan-sgw2ns1997,
title = {Stochastic Games with 2 Non-Absorbing States},
journal = {Discussion Papers},
number = {160},
year = {1997},
month = {12},
publisher = {Israel Journal of Mathematics 119 (2000), 29-54.},
abstract = {In the present paper we consider recursive games that satisfy an absorbing property defined by Vieille. We give two sufficient conditions for existence of an equilibrium payoff in such a game, and prove that if the game has at most two non- absorbing states, then at least one of the conditions is satisfied. Using a reduction of Vieille, we conclude that every stochastic game which has at most two non-absorbing states has an equilibrium payoff.},
url = {/files/dp160.pdf},
author = {Eilon Solan}
}
@booklet {neyman-aslolnfnvvsp1997,
title = {A Strong Law of Large Numbers for Nonexpansive Vector Valued Stochastic Processes},
journal = {Discussion Papers},
number = {145},
year = {1997},
month = {5},
publisher = {Israel Journal of Mathematics 111 (1999), 93-108.},
url = {/files/dp145.pdf},
author = {Neyman, Elon Kohlberg and Abraham}
}
@booklet {yaniv-wathfajuu1997,
title = {Weighting and Trimming: Heuristics for Aggregating Judgments Under Uncertainty},
journal = {Discussion Papers},
number = {139},
year = {1997},
month = {4},
publisher = {Organizational Behavior and Human Decision Processes 69 (1997), 237-249},
abstract = {In making major decisions (e.g., about medical treatment, acceptance of manuscripts for publication, or investment), decision makers frequently poll the opinions and subjective estimates of other judges. The aggregation of these opinions is often beset by difficulties. First, decision makers often encounter conflicting subjective estimates. Second, estimates are often expressed with a measure of uncertainty. The decision maker thus needs to reconcile inconsistencies among judgmental estimates and determine their influence on the overall aggregate judgement. In the empirical studies, I examine the idea that weighting and trimming are two important heuristics in the aggregation of opinions under uncertainty. The results from these studies are contrasted with the findings of a normative study using a computer simulation that was designed to assess the objective effects of weighting and trimming operations on the accuracy of estimation.},
url = {/files/dp139.pdf},
author = {Yaniv, Ilan}
}
@booklet {peleg-wwpms1997,
title = {When Will Payoff Maximization Survive?},
journal = {Discussion Papers},
number = {148},
year = {1997},
month = {6},
publisher = {Journal of Evolutionary Economics 11 (2001), 479-499},
abstract = {Survival of the fittest means that phenotypes behave as if they would maximize reproductive success. An indirect evolutionary analysis allows for stimuli which are not directly related to reproductive success although they affect behavior. One first determines the solution for all possible constelations of stimuli and then the evolutionarily stable stimuli. Our general analysis confirms the special results of former studies that survival of the fittest in case of commonly known stimuli requires either that own success does not depend on other{\textquoteright}s behavior or that other{\textquoteright}s behavbior is not influenced by own stimuli. When stimuli are private information one can derive similar necessary conditions for the survival of the fittest.},
url = {/files/dp148.pdf},
author = {Peleg, Werner Guth and Bezalel}
}
@booklet {robertjaumann-tad1996,
title = {Absent-Minded Driver, The},
journal = {Discussion Papers},
number = {94},
year = {1996},
month = {1},
publisher = {Games and Economic Behavior 20 (1997), 102-116},
abstract = {The example of the "absent-minded driver" was introduced by Piccione \& Rubinstein [1995] in the context of games and decision problems with imperfect recall. They claim that a "paradox" or "inconsistency" arises when the decision reached at the "planning stage" {\textendash} before the game is played {\textendash} is compared with that at the "action stage" {\textendash} when the game is played. Though the example is provocative and worth having, their analysis is unsound. A careful analysis reveals that while the considerations at the planing and action stages do differ, there is no paradox or inconsistency. 94R. Robert J. Aumann, Sergiu Hart \& Motty Perry, "The Absent-Minded Driver" (Revised, December 1996). The example of the "absent-minded driver" was introduced by Piccione \& Rubinstein [1995] in the context of games and decision problems with imperfect recall. They claim that a "paradox" or "inconsistency" arises when the decision reached at the "planning stage" is compared with that at the "action stage". Though the example is provocative and worth having, their analysis is questionable. A careful analysis reveals that while the considerations at the planing and action stages do differ, there is no paradox or inconsistency.},
url = {http://driver.html},
author = {Robert J. Aumann, Sergiu Hart and Motty Perry}
}
@booklet {driessen-aagaoabpftttcotgbg1996,
title = {An Alternative Game-Theoretic Analysis of a Bankruptcy Problem from the Talmud: The Case of the Greedy Bankruptcy Game},
journal = {Discussion Papers},
number = {93},
year = {1996},
month = {1},
publisher = {In: Game Theory IV, Year 1998 (yearbook Theory of Games and Applications) (Eds. L.A. Petrosjan and V.V. Mazalov) Nova Science Publishers Inc., New York, USA (1998), 45-61},
abstract = {The bankruptcy problem from the Talmud is modelled as a game (in coalitional form with transferable utility) which differs from the "standard bankruptcy game". A non-game theoretic solution to the bankruptcy problem is recovered by two different game theoretic approaches applied to the alternative game. The major game theoretic approach enables to interpret pairwise greedy or modest claims of creditors as largest or smallest core-allocations to creditors in the alternative game. A theory of consistency is elucidated with elementary game theoretic tools and proofs. As a separate topic, the indirect function of the "standard bankruptcy game" is determined and interpreted in an economic manner. The indirect function may be helpful to describe the game itself as well as its core (due to the duality between games and indirect functions).},
url = {{\textquoteright}},
author = {Theo S. H. Driessen}
}
@booklet {milchtaich-obipapsneocg1996,
title = {On Backward Induction Paths and Pure Strategy Nash Equilibria of Congestion Games},
journal = {Discussion Papers},
number = {107},
year = {1996},
month = {6},
publisher = {Published As: \"Crowding Games Are Sequentially Solvable\", International Journal of Game Theory 27 (1998), 501-509},
abstract = {In this note, a congestion game is a noncooperative normal-form game in which the players share a common set of strategies. The payoff a player receives for playing a particular strategy depends only on the total number of players playing that strategy and decreases with that number in a manner which is specific to the particular player. The corresponding sequential move game is the perfect-information extensive-form game in which players choose their plays sequentially rather than simultaneously, and each player knows the plays of the previous players. We show that the backward induction path of this game is a pure-strategy Nash equilibrium of the simultaneous move game. We also show that, by changing the order of movers in the sequential move game, every pure-strategy Nash equilibrium of the simultaneous move game that is not Pareto dominated by another equilibrium can be obtained.},
url = {{\textquoteright}},
author = {Igal Milchtaich}
}
@booklet {aumann-tcottw1996,
title = {Case of the Three Widows, The},
journal = {Discussion Papers},
number = {102},
year = {1996},
month = {6},
publisher = {Moriah 22,3-4, Tevet 5759 (January 1999) 98-107},
abstract = {Part I of a non-technical account, written in Hebrew for the Rabbinic Community, of "Game Theoretic Analysis of a Bankruptcy Problem from the Talmud", by R. Aumann and M. Maschler, Journal of Economic Theory 36 (1985), 195-213. The Talmudic passage in question is explained in more detail than in the JET paper, and additional Talmudic sources are adduced.},
url = {{\textquoteright}},
author = {Robert J. Aumann}
}
@booklet {amitai-cwiiobs1996,
title = {Cheap-Talk with Incomplete Information on Both Sides},
journal = {Discussion Papers},
number = {90},
year = {1996},
month = {1},
abstract = {We provide a characterization of the set of equilibria of two-person cheap-talk games with incomplete information on both sides. Each equilibrium generates a martingale with certain properties and one can obtain an equilibrium from each such martingale. Moreover, the characterization depends on the number of possible messages. It is shown that for every natural number n, there exist equilibrium payoffs that can be obtained only when the number of possible messages is at least n.},
url = {/files/dp90.pdf},
author = {Mor Amitai}
}
@booklet {amitai-cwrs1996,
title = {Cheap-Talk with Random Stopping},
journal = {Discussion Papers},
number = {91},
year = {1996},
month = {1},
abstract = {Cheap-Talk with Random Stopping is a Cheap-Talk game in which after each period of communication, with probability 1- \%, the talk ends and the players play the original game (i.e., choose actions and receive payoffs). In this paper the relations between Cheap-Talk games and Cheap-Talk with Random Stopping are analyzed.},
url = {/files/dp91.pdf},
author = {Mor Amitai}
}
@booklet {gossner-cois1996,
title = {Comparison of Information Structures},
journal = {Discussion Papers},
number = {116},
year = {1996},
month = {9},
publisher = {Games and Economic Behavior 30 (2000), 44-63.},
abstract = {We introduce two ways of comparing two information structures, say I and J. First, I is richer than J when for every compact game G, all correlated equilibrium distributions of G induced by J are also induced by I. Second, J is faithfully reproducible from I when all the players can compute from their information in the I "new information" that reproduces what they could have from J. We prove that I is richer than J if and only if J is faithfully reproducible from I.},
url = {/files/dp116.pdf},
author = {Olivier Gossner}
}
@booklet {el-yaniv-csfofpas1996,
title = {Competitive Solutions for Online Financial Problems: A Survey},
journal = {Discussion Papers},
number = {111},
year = {1996},
month = {8},
publisher = {ACM Computing Surveys 30 (1998), 28-69.},
abstract = {This paper surveys results concerning online algorithms for solving problems related to the management of money and other assets. In particular, the survey focuses on search, replacement and portfolio selection problems.},
url = {/files/dp111.pdf},
author = {Ran El-Yaniv}
}
@booklet {garybornstein-ciinatgoc1996,
title = {Cooperation in Intergroup, N-Person and Two-Person Games of Chicken},
journal = {Discussion Papers},
number = {96},
year = {1996},
month = {1},
publisher = {Journal of Conflict Resolution 41 (1997), 384-406},
abstract = {This paper introduces a new team game where players are engaged in simultaneous games of Chicken between and within teams. The intergroup Chicken game is proposed as a model of intergroup confrontations (e.g., military Conflicts, industrial disputes) involving bilateral threats where a failure on the part of either side to yield leads to an outcome (e.g., war, strike) that is disastrous to both sides. We report an experiment in which an intergroupChicken game with two players in each team was compared with a two-person Chicken and a (single-group) four-person Chicken. The games were played repeatedly and each round was preceded by a pre-game period in which players could signal their intention to cooperate or not. Our interest was in assessing the ability of the participants in the different games to cooperate, i.e., achieve the coordination necessary for the optimal realization of their mutual interests. We found that subjects were considerably less cooperative in the inter-group Chicken game than in either the two-person or the four-person game. Since the coordination problem in the intergroup game is of the same magnitude as that in the four-person game, we attribute most of the competitiveness observed in the intergroup conflict to the strategic properties of the game rather than the number of players involved.},
url = {/files/ 96.pdf},
author = {Gary Bornstein, David Budescu and Shmuel Zamir}
}
@booklet {peleg-adwnpapoasc1996,
title = {A Difficulty with Nash{\textquoteright}s Program: A Proof of a Special Case},
journal = {Discussion Papers},
number = {125},
year = {1996},
month = {12},
publisher = {Economics Letters 55 (1997), 305-308},
abstract = {Let g be a cooperative game and let N be the set of players of g. According to Nash{\textquoteright}s Program N can find a noncooperative game G such that some Nash equilibrium of G may serve as a solution to g. We show that the implementation of Nash{\textquoteright}s Program might face some difficulties. In this paper we restrict ourselves to finite games. However, we proved in a previous unpublished paper that the same difficulties also appear when infinite games are allowed.},
url = {/files/dp125.pdf},
author = {Bezalel Peleg}
}
@booklet {peleg-efgfgar1996,
title = {Effectivity Functions, Game Forms, Games, and Rights},
journal = {Discussion Papers},
number = {140},
year = {1996},
month = {1},
publisher = {Social Choice and Welfare 15 (1998), 67-80. Also In: Freedom in Economics, J-F. Laslier, M. Fleurbaey, N. Gravel \& A. Trannoy (eds), Routledge, London (1998), 116-132.},
abstract = {In this paper we offer an axiomatic approach for the investigation of rights by means of game forms. We give a new definition of constitution which consists of three components: the set of rights, the assignment of rights to groups of members of the society, and the distribution of power in the society (as a function of the distribution of rights). Using the forgoing definition we investigate game forms that faithfully represent the distribution of power in the society, and allow the members of the society to exercise their rights simultaneously. Several well-known examples are analyzed in the light of our framework. Finally, we find a connection between Sen{\textquoteright}s minimal liberalism and Maskin{\textquoteright}s result on implementation by Nash equilibria.},
url = {/files/dp140.pdf},
author = {Bezalel Peleg}
}
@booklet {garybornstein-esort1996,
title = {Experimental Study of Repeated Team-Games},
journal = {Discussion Papers},
number = {95},
year = {1996},
month = {1},
publisher = {European Journal of Political Economy 12 (1996), 629-639},
abstract = {We report an experiment in which the Intergroup Prisoner{\textquoteright}s Dilemma (IPD) game was contrasted with a structurally identical (single-group) Prisoner{\textquoteright}s Dilemma (PD). The games were played repeatedly for 40 rounds. We found that subjects were initially more likely to cooperate in the IPD game than in the PD game. However, cooperation rates decreased as the game progressed and, as a result, the differences between the two games disappeared. This pattern is consistent with the hypothesis that subjects learn the structure of the game and adapt their behavior accordingly. Computer simulations based on a simple learning model by Roth \& Erev (1995) support this interpretation.},
url = {/files/dp95.pdf},
author = {Gary Bornstein, Eyal Winter and Harel Goren}
}
@booklet {tamarkeasar-eeifbiebcofr1996,
title = {Exploration Effort in Foraging Bees Is Enhanced by Clustering of Food Resources},
journal = {Discussion Papers},
number = {119},
year = {1996},
month = {11},
abstract = {Foraging can be viewed as a dual activity: a food-collection process, and an exploration process, which enables foragers to collect information on food resources. Exploration of food sources may involve patch sampling, as well as sampling of various food sources within heterogeneous patches. The present study aimed to quantify exploration effort in relation to the spatial distribution of the food sources. Exploration effort was measured in two-stage laboratory experiments on naive bumblebees, Bombus terrestris (L.). In the first stage the bees were allowed to forage on three types of color-distinct artificial flowers. In the second stage a new type of artificial flowers ("exploratory flowers"), which were non-rewarding, was added. The four types of artificial flowers were either arranged in spatially distinct clusters or randomly intermingled. Two reward schedules were used in each spatial arrangement: constant refilling of visited flowers and probabilistic refilling. The bees{\textquoteright} visit to the exploratory flowers were recorded as a measure of exploratory activity, and were related to their previous foraging experience. Bees which experienced a probabilistic reward schedule explored more than bees from the constant-reward treatments. Bees which foraged on clustered flowers directed a larger proportion of their flights to exploratory flowers, and made more visits to these flowers, than bees that foraged on intermingled flowers. This tendency was obtained both in the probabilistic and in the constant reward schedules. The results suggest that bees allocate more effort to the exploration of novel feeding patches than to the exploration of new food types within a known patch.},
url = {{\textquoteright}},
author = {Tamar Keasar, Uzi Motro and Avi Shmida}
}
@booklet {michaellandsberger-fawtrovick1996,
title = {First-Price Auctions When the Ranking of Valutions Is Common Knowledge},
journal = {Discussion Papers},
number = {117},
year = {1996},
month = {9},
publisher = {Review of Economic Design 6 (2001), 461-480},
abstract = {We consider an augmented version of the symmetric private value auction model with independent types. The augmentation, intended to illustrate reality, concerns information bidders have about their opponents. To the standard assumption that every bidder knows his type and the distribution of types is common knowledge we added the assumption that the ranking of bidders{\textquoteright} valuations is common knowledge. This set-up induces a particular asymmetric auction model that raises serious technical difficulties. We prove existence and uniqueness of equilibrium in pure strategies in the two bidder case. We also show that the model generally has no analytic solution. If the distribution of valuations is uniform, both bidders bid pointwise more aggressively relative to the standard symmetric case. However, this property does not apply to all distributions of valuations. Finally, we also provide a numerical solution of equilibrium bid functions for the uniform distribution case.},
url = {/files/dp117.pdf},
author = {Michael Landsberger, Jacob Rubinstein, Elmar Wolfstetter and Shmuel Zamir}
}
@booklet {tamarkeasar-faaeaibteopv1996,
title = {Foraging as an Exploratory Activity in Bees: The Effect of Patch Variability},
journal = {Discussion Papers},
number = {118},
year = {1996},
month = {11},
abstract = {Foraging can be viewed as a dual activity: a food-collection process, and an exploration process, which enables foragers to sample and evaluate food resources. The exploratory role of foraging was studied in a series of two-stage laboratory experiments on naive bumblebees. In the first stage of the experiments the bees were allowed to forage on three types of artificial flowers, which were arranged in spatially distinct patches. The mean reward offered by the flowers, the variability in reward among feeding patches and the variability of rewards within patches were varied between experimental treatments. In the second stage a new feeding patch, containing non-rewarding flowers, was added. The bees{\textquoteright} visits to this patch were recorded as a measure of exploratory activity, and were related to their previous foraging experience. Bees which had experienced within-patch reward variability explored the non-rewarding patch more than bees which had not been previously exposed to within-patch variability. On the other hand, variability in rewards between feeding patches led to lower exploration levels than in the control experiments, which had no between-patch variability. Exploration effort was not affected by the mean overall nectar volume offered to the bees. Some visits to the non-rewarding patch were recorded even when the other patches offered high nectar volumes on each foraging visit. Individuals within the same treatment varied considerably in exploration effort. Possible sources of this variation are discussed. We conclude that exploration effort in bees is independent of foraging experience to some extent. On the other hand, it is also affected by the variability of their food sources.},
url = {{\textquoteright}},
author = {Tamar Keasar, Uzi Motro and Avi Shmida}
}
@booklet {milchtaich-guoeincg1996,
title = {Generic Uniqueness of Equilibria in Nonatomic Congestion Games},
journal = {Discussion Papers},
number = {97},
year = {1996},
month = {1},
abstract = {Generic uniqueness of pure-strategy Nash equilibrium, and uniqueness of the equilibrium outcome, are proved for a class of noncooperative nonatomic (large) games where a player{\textquoteright}s payoff depends on, and strictly decreases with, the measure of the set of players playing the same (pure) strategy he is playing. If the play of mixed strategies is allowed, then similar results still hold when the assumption of nonatomicity of the measure is removed. Generic uniqueness of the Cournot-Nash equilibrium distribution, corresponding to a description of a game in terms of distribution of player types, is also proved.},
url = {{\textquoteright}},
author = {Igal Milchtaich}
}
@booklet {carmenherrero-iracrtrs1996,
title = {Individual Rights and Collective Responsibility: The Rights-Egalitarian Solution},
journal = {Discussion Papers},
number = {106},
year = {1996},
month = {6},
publisher = {Mathematical Social Sciences 37 (1999), 59-77.},
abstract = {The problem of distributing a given amount of a divisible good among a set of agents which may have individual entitlements is considered here. A solution tothis problem, called the Rights-Egalitarian Solution, is proposed. This allocation rule divides equally among the agents the difference between the aggregate entitlements and the amount of that good available. A relevant feature of the analysis developed is that no sign restriction is established on the parameters of the model (that is, the aggregate entitlements may exceed or fall short of the amount of the good, agents{\textquoteright} rights may be positive or negative, the allocation may involve a redistribution of agents{\textquoteright} holdings, etc.). Several characterizations are provided, and its game theoretic properties are analyzed.},
url = {/files/dp106.pdf},
author = {Carmen Herrero, Michael Maschler and Antonio Villar}
}
@booklet {tamarkeasar-imrifbfdaabrraacwcoft1996,
title = {Innate Movement Rules in Foraging Bees: Flight Distances Are Affected by Recent Rewards and Are Correlated with Choice of Flower Type},
journal = {Discussion Papers},
number = {120},
year = {1996},
month = {11},
publisher = {Behavioral Ecology and Sociobiology 39 (1996), 381-388},
abstract = {The non-random movements patterns of foraging bees are believed to increase their search efficiency. These patterns may be innate, or they may be learned through the bees{\textquoteright} early foraging experience. To identify the innate components of foraging rules, we characterized the flight of naive bumble bees, foraging on non-patchy "field" of randomly scattered artificial flowers with three color displays. The flowers were randomly mixed and all three flower types offered equal nectar volumes. Visited flowers were refilled with probability 0.5 Flight distances, flight durations and nectar probing durations were determined and related to the bees{\textquoteright} recent experiences. The naive bees exhibited area-restricted search behavior, i.e, flew shorter distances following visits to rewarding flowers than after visits to empty flowers. Additionally , flight distances during flower-type transitions were longer than flight distances between flowers of the same type. The two movements rules operated together: flight distances werelongest for flights between flower types following non-rewarding visits, shortest for within-type flights following rewarding visits. An increase in flight displacement during flower-type shifts was also observed in a second experiment, in which all three types were always rewarding. In this experiment, flower-type shifts were also accompanied by an increase in flight duration. Possible relationships between flight distances, flight durations and flower-type choice are discussed.},
url = {/files/db120.pdf},
author = {Tamar Keasar, Uzi Motro and Avi Shmida}
}
@booklet {el-yaniv-iirtbcotdfotcr1996,
title = {Is It Rational to Be Competitive? On the Decision-Theoretic Foundations of the Competitive Ratio},
journal = {Discussion Papers},
number = {113},
year = {1996},
month = {8},
abstract = {The competitive ratio, a performance measure for online algorithms, or alternatively, a decision making criterion for strict uncertainty conditions, has become a popular and accepted approach within theoretical computer science. This paper closely examines this criterion, both by characterizing it with respect to a set of axioms and in comparison to other known criteria for strict uncertainty.},
url = {{\textquoteright}},
author = {Ran El-Yaniv}
}
@booklet {young-lwhb1996,
title = {Learning with Hazy Beliefs},
journal = {Discussion Papers},
number = {115},
year = {1996},
month = {9},
publisher = {Published As: \"Learning, Hypothesis Testing, and Nash Equilibrium\", Games and Economic Behavior 25 (2003), 73-96.},
abstract = {Players are rational if they always choose best replies given their beliefs. They are good predictors if the difference between their beliefs and the distribution of the other{\textquoteright}s actual strategies goes to zero over time. Learning is deterministic if beliefs are fully determined by the initial conditions and the observed data. (Bayesian updating is a particular example). If players are rational{\textquoteright} good predictors, and learn deterministically, there are many games for which neither beliefs nor actions converge to a Nash equilibrium. We introduce an alternative approach to learning called prospecting in which players are rational and good predictors, but beliefs have a small random component. In any finite game, and from any initial conditions, prospecting players learn to play arbitrarily close to Nash equilibrium with probability one.},
url = {/files/db115.pdf},
author = {Young, Dean P. Foster and H. Peyton}
}
@booklet {tauman-mcwes1996,
title = {Market Crashes Without External Shocks [Revised]},
journal = {Discussion Papers},
number = {124},
year = {1996},
month = {12},
publisher = {Journal of Business 77 (2004), 1-8},
abstract = {It is shown here that market crashes and bubbles can arise without external shocks. Sudden changes in behavior may be the result of endogenous information processing. Except for the daily observation of the market, there is no new information, no communication and no coordination between the participants.},
url = {/files/ crash.html},
author = {Tauman, Sergiu Hart and Yair}
}
@booklet {karp-nocorp1996,
title = {Nearly Optimal Competitive Online Replacement Policies},
journal = {Discussion Papers},
number = {100},
year = {1996},
month = {3},
publisher = {Mathematics of Operations Research 22 (1997), 814-839.},
abstract = {This Paper studies the following online replacement problem. There is a real function f(t), called the flow rate, defined over a finite time horizon [0,T]. It is known that m\% f(t) \% M for some reals 0 \% m < M. At time 0 an online player starts to pay money at the rate of f(0). At each time 0 < t \% T the player may changeover and continue paying money at the rate f(t). The complication is that each such changeover incures some fixed penalty. The player is called online as at each time t the player knows f only over the time interval [0,t]. The goal of the player is to minimize the total cost comprised of cumulative payment flow plus change over costs. This formulation of the replacement problem has various interesting applications among which are: equipment replacement, supplier replacement, the menu cost problem and mortgagere financing. With respect to the competitive ratio performance measure, this paper seeks to determine the best possible competitive ratio achievable by an online replacement policy. Our results include the following: a general lower bound on the performance of any deterministic policy, a policy that is optimal in several special cases and a simple policy that is approximately optimal.},
url = {{\textquoteright}},
author = {Karp, Ran El-Yaniv and Richard M.}
}
@booklet {aumann-anotcg1996,
title = {A Note on the Centipede Game},
journal = {Discussion Papers},
number = {109},
year = {1996},
month = {6},
publisher = {Games and Economic Behavior 23 (1998), 97-105},
abstract = {In Rosenthal{\textquoteright}s Centipede Game, if it is commonly known that the players choose rationally at vertices that are actually reached, then the first player "goes out" at the first move.},
url = {/files/dp109.pdf},
author = {Robert J. Aumann}
}
@booklet {tamarkeasar-omrofsbbii1996,
title = {Overnight Memory Retention of Foraging Skills by Bumblebees Is Imperfect},
journal = {Discussion Papers},
number = {122},
year = {1996},
month = {11},
publisher = {Animal Behaviour 52 (1996), 95-104},
abstract = {Newly emerged bees learn to forage more efficiently as they gain experience. We hypothesized that foraging efficiency would increase as bees gain experience during the day, but would decrease overnight, due to loss of memory. To test this hypothesis, we allowed naive bombus terretris bumblebees to forage on two clusters of artificial flowers of unequal profitabilities during three consecutive days. Nectar intake rate, percentage visitation to the more profitable cluster, probing time and time intervals between visits were computed as measures of the bees{\textquoteright} foraging efficiency. Nectar intake rates increased significantly during the day, and decreased partially but significantly after a night. There was much variation between individual bees in nectar intake rates. The bees did not show a preference for one of the clusters at the onset of the experiment, and no consistent increase in visitation to the more profitable cluster was found during single observation days for all bees. Most individuals did not visit the higher-reward cluster exclusively by the end of the third day. However, visitation to the higher-reward cluster did increase significantly when the first day of observation was compared to the third day. Preference for the higher-reward cluster increased over the first night but decreased significantly over the second night. Probing time and inter-visit intervals decreased significantly during observation days, and increased significantly after a night. The results indicate that bees learn to approach and probe flowers faster, as they gain experience, during a foraging day, but that these skills are partially forgotten overnight. Patch preference is formed more slowly. Once formed, it is also weakened overnight. Such partial forgetting may aid the bee in reacting quickly to overnight changes in resource profitability by modifying flower choices and handling techniques.},
url = {/files/dp122.pdf},
author = {Tamar Keasar, Avi Shmida and Yoav Shur}
}
@booklet {okada-rgwbe1996,
title = {Repeated Games with Bounded Entropy},
journal = {Discussion Papers},
number = {114},
year = {1996},
month = {9},
publisher = {Games and Economic Behavior 30 (2000), 228-247.},
abstract = {We study the repeated games with a bound on strategic entropy (Neyman and Okada (1996)) of player 1{\textquoteright}s strategy while player 2{\textquoteright}s strategy is unrestricted. The strategic entropy bound will be a function (N) of the number of repetitions N, and hence, so is the maximin value of N((N)) of the repeated game with such bound. Our interest is in the asymptotic behavior of N((N)) (as N ) under the condition the per stage entropy bound, (N)/N where 0. We characterize the asymptotics of N((N)) by a continuous function of . Specifically, it is shown that this function is the concavification of the maximin value of the stage game in which player 1{\textquoteright}s action is restricted to those with entropy at most . We also show that, for infinitely repeated games, if player 1{\textquoteright}s strategies are restricted to those with strategic entropy rate at most , then the maximin value () exists and it, too, equals the concavified function mentioned above evaluated at .},
url = {/files/dp114.pdf},
author = {Okada, Abraham Neyman and Daijiro}
}
@booklet {amitai-rgwiiobs1996,
title = {Repeated Games with Incomplete Information on Both Sides},
journal = {Discussion Papers},
number = {105},
year = {1996},
month = {6},
abstract = {We analyze the set of equilibria of two-person repeated games with incomplete information on both sides. We show that each equilibrium generates a martingale with certain properties. Moreover, for games, satisfying a certain condition that we call "tightness", it is shown that the converse also holds: each such martingale generates an equilibrium.},
url = {/files/dp105.pdf},
author = {Mor Amitai}
}
@booklet {gershonben-shakhar-saysftrawyhta1996,
title = {Seek and Ye Shall Find: Test Results Are What You Hypothesize They Are},
journal = {Discussion Papers},
number = {123},
year = {1996},
month = {11},
publisher = {Journal of Behavioral Decision Making 11 (1998), 235-249},
abstract = {Expert clinicians were given batteries of psychodiagnostic test results (Rorshach, TAT, Drow-A-Person, Bender-Gestalt, Wechsler) to analyze. For half, a battery came along with a suggestion that the person suffers from Borderline Personality disorder, and for half - that battery was accompanied by a suggestion that he suffers from Paranoid Personality disorder. In study 1, the suggestion was made indirectly, through a background story that preceded the test results. In study 2, the suggestion was made directly, by the instructions given. The experts saw in the tests what they hypothesized to be there. In particular, the target diagnoses were rated higher when they were hypothesized than when they were not.},
url = {/files/dp123.pdf},
author = {Gershon Ben-Shakhar, 5a Bar-Hillel, Yoram Bilu, Gaby Shefler}
}
@booklet {weiss-slfmt1996,
title = {Significance Levels for Multiple Tests},
journal = {Discussion Papers},
number = {101},
year = {1996},
month = {3},
publisher = {Statistics and Probability Letters 35 (1997), 43-48},
abstract = {Let X1, ... , Xn be n random variables, with cumulative distribution functions F1, ... , Fn. define \%i := fi(XI) for all i, and let \%(1) \% ... \% \%(n) be the order statistics of the (\%i)i. Let \%1 \% ... \% \%n be n numbers in the interval [0,1]. We show that the probability of the event R := \%\%(i) \% \%i for all 1 \% i \% n \%) is at most mini \%n\%i/i\%. Moreover, this bound is exact: for any given n marginal distributions (Fi)i, there exists a joint distribution with these marginals such that the probability of R is exactly mini \%n\%i/i\%. This result is used in analyzing the significance level of multiple hypotheses testing. In particular, it implies that the R?ger tests dominate all tests with rejection regions of type R as above.},
url = {/files/ probs.html},
author = {Weiss, Sergiu Hart and Benjamin, Nathans,}
}
@booklet {haimovich-tsaivgotenopsarporlp1996,
title = {Simplex Algorithm Is Very Good!: On the Expected Number of Pivot Steps and Related Properties of Random Linear Programs, The},
journal = {Discussion Papers},
number = {99},
year = {1996},
month = {2},
abstract = {In their paper How Good is the Simplex Algorithm?, Klee and Minty exhibited a sequence of linear programs for which the number of pivot steps in the simplex algorithm grows exponentially with the dimensions of the program. We present a probabilistic model in which the expected numberof steps for a variant of the simplex method grows linearly with the dimensions. For programs with parallel pair of inequalities (lower and upper bounds), we present a model in which the expected number of steps from minimum to maximum is d. We also present related results concerning the expected complexity of multi-objective linear programming.},
url = {{\textquoteright}},
author = {Mordecai Haimovich}
}
@booklet {israeli-sdoitrg1996,
title = {Sowing Doubt Optimally in Two-Person Repeated Games},
journal = {Discussion Papers},
number = {112},
year = {1996},
month = {8},
publisher = {Games and Economic Behavior 28 (1999), 203-216.},
abstract = {Consider a two-person repeated game (with complete information). Assume that one of the players - say player 1 - has the possibility to sow doubt, in the mind of his opponent, as to what his own (i.e., player 1{\textquoteright}s) payoffs are. This results in a two-person repeated game with incomplete information. It turns out that, by sowing this kind of doubt, a player can increase his minimal equilibrium payoff in the original game. We prove that this minimum is maximal when only one payoff matrix, which is equal to the negative payoff matrix of the opponent, is added. Thus, it is optimal for a player to make his opponent believe that, with some positive probability, he is playing a zero-sum game. We obtain two formulas for calculating this maximal minimum payoff. Finally, we look at the outcome when both players simultaneously sow doubt in this way.},
url = {/files/dp112.pdf},
author = {Eitan Israeli}
}
@booklet {aumann-otsotaigt1996,
title = {On the State of the Art in Game Theory},
journal = {Discussion Papers},
number = {108},
year = {1996},
month = {6},
publisher = {Games and Economic Behavior 24 (1998), 181-210. Also in W. Albers, W. Guth, P. Hammerstein, B. Moldovanu \& E. van Damme (eds.), Understanding Strategic Interaction, Essays in Honor of R. Selten, (1996) Springer-Verlag 8-34},
abstract = {An interview conducted on June 30, 1995, which is to appear in the Selten Festschrift: Understanding strategic Interaction, edited by Wulf Albers, Werner Guth, Peter Hammerstein, Benny Moldovanu, and Eric van Damme, with the help of Martin Strobel, to be published by Springer in 1996. The interview ranges over a wide variety of topics related to Game Theory, with special emphasis on empirical applications, both of the cooperative and of the noncooperative theories.},
url = {/files/dp108.pdf},
author = {Robert J. Aumann}
}
@booklet {okada-seacirg1996,
title = {Strategic Entropy and Complexity in Repeated Games},
journal = {Discussion Papers},
number = {104},
year = {1996},
month = {6},
publisher = {Games and Economic Behavior 29 (1999), 191-223.},
abstract = {We study repeated two-person zero-sum games in which one of the players has a restricted set of strategies. Restriction is imposed directly on the set of mixed strategies. To this end, we introduce a notion of entropy for mixed strategies as the means of bounding strategies available to a player. We derive a relation between the two types of strategies in terms of entropy. Using this relation together with certain properties of entropy, we show that if the number of repetitions grows faster than the entropy bound, then an unrestricted player can asymptotically hold a restricted player{\textquoteright}s payoff down to his maximin level in pure actions of the stage game. We consider implications of this result concerning the asymptotic behavior of the value finitely repeated games with finite automata and bounded recall.},
url = {/files/dp104.pdf},
author = {Okada, Abraham Neyman and Daijiro}
}
@booklet {dansfelsenthal-tvg1996,
title = {Ternary Voting Games},
journal = {Discussion Papers},
number = {98},
year = {1996},
month = {2},
publisher = {International Journal of Game Theory 26 (1997), 335-351},
abstract = {We define ternary voting games (TVGs), a generalization of simple voting games (SVGs). In a play of an SVG each voter has just two options: voting {\textquoteleft}yes{\textquoteright} or {\textquoteleft}no{\textquoteright}. In a TVG a third option is added: abstention. Every SVG can be regarded as a (somewhat degenerate) TVG; but the converse is false. We define appropriate generalizations of the Shapley-Shubik and Banzhaf indices for TVGs. We define also the responsiveness (or degree of democratic participation) of a TVG and determine, for each n, the most responsive TVGs with n voters. We show that these maximally responsive TVGs are more responsive than the corresponding SVGs.},
url = {/files/dp98.pdf},
author = {Dan S. Felsenthal, Moshe Machover}
}
@booklet {wolinsky-atotfwnec1996,
title = {A Theory of the Firm with Non-Binding Employment Contracts},
journal = {Discussion Papers},
number = {110},
year = {1996},
month = {7},
publisher = {Econometrica 68 (2000), 875-910.},
abstract = {This paper analyzes a dynamic model of a firm in which the wage of each employee is determined in separate bilateral negotiations with the firm. The contractsbetween the firm and its employees are non-binding in the sense that they can be repeatedly renegotiated to adjust to changing situations. The Bargaining power of an employee stems from the threat of quitting that will deprive the firm of this worker{\textquoteright}s marginal contribution and will put the firm in a weaker position against the remaining workers. This threat is offset to some extent by the replacement opportunities that the firm has, but these are only imperfect in the sense that replacement of quits requires time and effort. The paper characterizes a class of equilibria for this scenario and examines their features. These include a sharp decline of the wage at the firm{\textquoteright}s target employment level, a mark-up of the wage over the employees{\textquoteright} reservation wage and over-employment.},
url = {{\textquoteright}},
author = {Asher Wolinsky}
}
@booklet {el-yaniv-taimcolaa1996,
title = {There Are Infinitely Many Competitive-Optimal Online List Accessing Algorithms},
journal = {Discussion Papers},
number = {103},
year = {1996},
month = {6},
abstract = {This paper presents a new family of optimal, 2-competitive, deterministic online list accessing algorithms. This family includes as members the well known MOVE-TO-FRONT (MTF) algorithm, and the recent, more "conservative" algorithm TIMESTAMP due to Albres.},
url = {{\textquoteright}},
author = {Ran El-Yaniv}
}
@booklet {driessen-teabvagsdtagp1996,
title = {Tree Enterprises and Bankruptcy Ventures: A Game-Theoretic Similarity Due to a Graph-Theoretic Proof},
journal = {Discussion Papers},
number = {92},
year = {1996},
month = {1},
publisher = {Discrete Applied Mathematics 79 (1997), 105-117},
abstract = {In a tree enterprise, users reside at the nodes of the tree and their aim is to connect themselves, directly or indirectly, to the root of the tree. The construction costs of arcs of the tree are given by means of the arc-cost-function associated with the tree. Face to face with this tree enterprise, the bankruptcy venture is described in terms of the estate of the bankrupt concern and the claims of the various creditors. The objective of the paper is to provide conditions (on the claims and the surplus of the claims in the bankruptcy venture) which are sufficient and necessary for the bankruptcy venture to agree with some tree enterprise. It is established that the bankruptcy venture agrees with some tree enterprise if and only if the surplus of claims in the bankruptcy venture is at most the size of the second smallest claim (in the weak sense). For that purpose, both the tree enterprise as well as the bankruptcy venture are modelled as a cooperative game with transferable utility. Within the framework of cooperative game theory, the proof of the equivalence theorem concerning the tree enterprise game and the bankruptcy game, under the given circumstances, is based on graph theoretic tools in a tree structure.},
url = {/files/dp92.pdf},
author = {Theo S. H. Driessen}
}
@booklet {yonatanbilu-wcvinutfcofbocaf1996,
title = {When Color Vision Is Not Useful: The Floral Choices of Foraging Bumblebees on Color-Polymorphic Artificial Flowers},
journal = {Discussion Papers},
number = {121},
year = {1996},
month = {11},
publisher = {Israel Journal of Plant Sciences 45 (1997), 223-233},
abstract = {Naive bumblebees were allowed to forage on 30 color-polymorphic artificial flowers, which were identical in morphology and reward schedule, but were marked by either a human-blue, human-green or a human-white landing surface. The probability of nectar rewards in the artificial flowers, and their spatial distribution, were manipulated experimentally. The bees{\textquoteright} color choices in the different experimental treatments were compared. The proportions of visits to the three color morphs deviated significantly from the expected random choice (1/3-1/3-1/3) for more than 50\% of the bees. Out of these bees, 38\%, 32\% and 30\% formed a preference for human-blue, human-green and human-white, respectively. The frequency of non-random color choice, and the strength of the deviation from random choice, were highest when the different morphs were placed in separate clusters, lower when they were placed in adjacent clusters, and lowest when they were randomly intermingled. Non-random color choice was also more pronounced when the bees were rewarded according to a constant schedule, rather than probabilistically. A statistically significant preference for human-blue was found during the bees{\textquoteright} first three visits. The bees{\textquoteright} tendency for "runs" of consecutive visits to the same flower color can partially account for their non-random color choices. The specific color preferences of individuals could not be related to their early foraging experiences.},
url = {{\textquoteright}},
author = {Yonatan Bilu, Tamar Keasar, Uzi Motro and Avi Shmida}
}
@booklet {sheshinski-oaeact1995,
title = {On Atmosphere Externality and Corrective Taxes},
journal = {Discussion Papers},
number = {84},
year = {1995},
month = {10},
publisher = {Journal of Public Economics 88 (2004), 727-734},
abstract = {It has been argued that in the presence of an {\textquoteleft}Atmosphere Externality{\textquoteright} and competitive behavior by households, a uniform commodity tax on an externality - generating good attains the first best. It is demonstrated, however, that if income redistribution is desirable then personalized taxes are required for a second-best optimum. Each of these taxes is the sum of a uniform (across households) tax and a component, positive or negative, which depends on the household{\textquoteright}s income and demand elasticities. Second-best optimal indirect taxes and rules for investment in externality-reducing measures are also considered.},
url = {/files/db84.pdf},
author = {Eytan Sheshinski}
}
@booklet {mas-colell-bav1995,
title = {Bargaining and Value},
journal = {Discussion Papers},
number = {66},
year = {1995},
month = {1},
publisher = {Econometrica 64 (1996), 357-380},
abstract = {We present and analyze a model of non-cooperative bargaining among n participants, applied to situations describable as games in coalitional form. This leads to a unified theory that has as special cases the Shapley value in the transferable utility case, the Nash bargaining solution in the pure bargaining case, and the recently introduced Maschler-Owen consistent value solution in the general (non-transferable utility) case.},
url = {/files/ nbarg.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {feinberg-acttat1995,
title = {A Converse to the Agreement Theorem},
journal = {Discussion Papers},
number = {83},
year = {1995},
month = {11},
abstract = {In Aumann (1976) - "Agreeing to Disagree" - it is shown that if there is a common prior then common knowledge of disagreement is impossible. This paper studies the converse proposition. The lack of a common prior is shown to yield common knowledge of disagreement in a variety of cases. However, an example demonstrates that this result cannot be generalized.},
url = {{\textquoteright}},
author = {Yossi Feinberg}
}
@booklet {neyman-citrpdwtnosinck1995,
title = {Cooperation in the Repeated Prisoners{\textquoteright} Dilemma When the Number of Stages Is Not Commonly Known},
journal = {Discussion Papers},
number = {65},
year = {1995},
month = {1},
publisher = {(revised indp $\#$162)},
abstract = {It has often been observed that cooperative behavior emerges in actual play of the repeated prisoners{\textquoteright} dilemma. This observation seems to be in conflict with the fact that, in any finite repetition of the prisoners{\textquoteright} dilemma, all Nash equilibria (and even all correlated equilibria) lead to the non-cooperative outcome in each stage. In this paper we show that a very small departure from the common knowledge assumption on the number, T, of repetitions already enables cooperation. More generally, with such a departure, any feasible individually-rational outcome of any one-shot game can be approximated by a Nash equilibrium of a finitely-repeated version of that game. The sense in which the departure from common knowledge is "small" is as follows: (i) With probability one, the players know T with precision +- 1. (ii) With probability 1 - \%, the players know T precisely; moreover, this knowledge is mutual to degree \%T. (iii) the deviation of T from its expectation is extremely small.},
url = {{\textquoteright}},
author = {Abraham Neyman}
}
@booklet {neyman-craa1995,
title = {Cooperation, Repetition and Automata},
journal = {Discussion Papers},
number = {88},
year = {1995},
month = {11},
publisher = {In S. Hart \& A. Mas-Colell (eds.), Cooperation: Game-Theoretic Approaches, (1995) Springer-Verlag 233-255},
abstract = {This chapter studies the implications of bounding the complexity of players{\textquoteright} strategies in long term interactions. The complexity of a strategy is measured by the size of the minimal automaton that can implement it. A finite automaton has a finite number of states and an initial state. It prescribes the action to be taken as a function of the current state and its next state is a function of its current state and the actions of the other players. The size of an automaton is its number of states. The results study the equilibrium payoffs per stage of the repeated games when players{\textquoteright} strategies are restricted to those implementable by automata of bounded size.},
url = {{\textquoteright}},
author = {Abraham Neyman}
}
@booklet {jamesasundali-cimegwsp1995,
title = {Coordination in Market Entry Games with Symmetric Players},
journal = {Discussion Papers},
number = {72},
year = {1995},
month = {3},
publisher = {Organizational Behavior and Human Decision Processes 64 (1995), 203-218},
abstract = {We report the results of two experiments designed to study tacit coordination in a class of market entry games with linear payoff functions, binary decisions, and zero entry costs, in which each of n = 20 players must decide on each trial whether or not to enter a market whose capacity is public knowledge. The results show that although the subjects differ considerably from one another in their decision policies, tacit coordination emerges quickly on the aggregate level and is accounted for most successfully by the Nash equilibrium solution for noncooperative n-person games.},
url = {/files/dp72.pdf},
author = {James A. Sundali, Amnon Rapoport and Darryl A. Seale}
}
@booklet {neyman-ceapg1995,
title = {Correlated Equilibrium and Potential Games},
journal = {Discussion Papers},
number = {80},
year = {1995},
month = {7},
publisher = {International Journal of Game Theory 26 (1997), 223-227},
abstract = {Any correlated equilibrium of a strategic game with bounded payoffs and convex strategy sets which has a smooth concave potential, is a mixture of pure strategy Nash equilibrium. If moreover, the strategy sets are compact and the potential is strictly concave, then the game has a unique correlated equilibrium.},
url = {{\textquoteright}},
author = {Abraham Neyman}
}
@booklet {sorin-eirgoiitdsc1995,
title = {Equilibria in Repeated Games of Incomplete Information: The Deterministic Symmetric Case},
journal = {Discussion Papers},
number = {82},
year = {1995},
month = {7},
publisher = {In T. Parthasaraty Et Al. (eds.) Game-Theoretic Applications to Economics and Operations Research ( ) Kluwer Academic Press},
abstract = {Every two person game of incomplete information in which the information to both players is identical and deterministic has an equilibrium.},
url = {{\textquoteright}},
author = {Sorin, Abraham Neyman and Sylvain}
}
@booklet {sorin-eirgoiitgsc1995,
title = {Equilibria in Repeated Games of Incomplete Information: The General Symmetric Case},
journal = {Discussion Papers},
number = {86},
year = {1995},
month = {11},
publisher = {International Journal of Game Theory 27 (1998), 201-210.},
abstract = {Every two person repeated game of symmetric incomplete information in which the signals sent at each stage to both players are identical and generated by a state and moves dependent probability distribution on a given finite alphabet has an equilibrium payoff.},
url = {/files/dp86.pdf},
author = {Sorin, Abraham Neyman and Sylvain}
}
@booklet {amnonrapoport-epilgmeg1995,
title = {Equilibrium Play in Large Group Market Entry Games},
journal = {Discussion Papers},
number = {73},
year = {1995},
month = {3},
publisher = {Management Science 44 (1998), 119-141},
abstract = {Coordination behavior is studies experimentally in a class of market entry games featuring symmetric players, complete information, zero entry costs, and several randomly presented values of the market capacity. Once the market capacity, c, becomes common knowledge, each player must decide privately whether to enter the market and receive a payoff which increases in the difference between c and the number of entrants, m, or stay out. Payoffs forstaying out are either positive, giving rise to the domain of gains, or negative, giving rise to the domain of losses. The major findings are substantial individual differences in decision policies, which do not diminish with practice, and aggregate group behavior which is organized extremely well in both the domains of gains and loses by the Nash equilibrium solution.},
url = {{\textquoteright}},
author = {Amnon Rapoport, Darryl A. Seale and James A. Sundali}
}
@booklet {neyman-frgwfa1995,
title = {Finitely Repeated Games with Finite Automata},
journal = {Discussion Papers},
number = {69},
year = {1995},
month = {6},
publisher = {(revised indp $\#$161)},
abstract = {{The paper studies the implication of bounding the complexity of the strategies players may select on the set of equilibrium payoffs in repeated games. The complexity of a strategy is measured by the size of the minimal automaton that can implement it. A finite automaton is an automated machine that implements a strategy; it has a finite number of states and an initial state. It prescribes the action to be taken as a function of the current state and a transition function changing the states of the automaton as a function of its current state and the present actions of the other players. The size of an automaton is its number of states. The Main results imply in particular that in two person repeated games, the equilibrium payoffs of a sequence of such games, G(n)},
url = {/files/dp69.pdf},
author = {Abraham Neyman}
}
@booklet {yaacovzbergman-gpoop1995,
title = {General Properties of Option Prices},
journal = {Discussion Papers},
number = {77},
year = {1995},
month = {5},
publisher = {Journal of Finance 51 (1996), 1573-1610},
abstract = {This article establishes that, in a one-dimensional diffusion world, any contingent claim{\textquoteright}s delta is bounded by its delta at maturity and, if its payoff is convex, its current value is convex in the underlying{\textquoteright}s value. A decline in the present value of the exercise price can be associated with a decline in a call{\textquoteright}s price. Bounds on call prices and deltas are derived for the case when the underlying{\textquoteright}s volatility is bounded. If the underlying follows a multi-dimensional diffusion (a stochastic volatility world), or a discontinuous or non-Markovian process, call prices can be decreasing, concave function of the underlying{\textquoteright}s value.},
url = {/files/dp77.pdf},
author = {Yaacov Z. Bergman, Bruce D. Grundy and Zvi Wiener}
}
@booklet {feinberg-aicsfavgcbs1995,
title = {An Incomplete Cooperation Structure for a Voting Game Can Be Stable},
journal = {Discussion Papers},
number = {85},
year = {1995},
month = {11},
publisher = {Games and Economic Behavior 24 (1998), 2-9},
abstract = {Aumann and Myerson (1988) defined a linking game leading to the formation of cooperation structures. They asked whether it is possible for a simple game to have a stable structure in which no coalition forms, i.e., in which the cooperation graph is not internally complete. We answer this question affirmatively; specifically, we present a simple proper weighted majority game with a connected incomplete structure, and prove it to be stable.},
url = {/files/dp85.pdf},
author = {Yossi Feinberg}
}
@booklet {zamir-iigatnd1995,
title = {Incomplete Information Games and the Normal Distribution},
journal = {Discussion Papers},
number = {70},
year = {1995},
month = {2},
abstract = {We consider a repeated two-person zero-sum game in which the payoffs in the stage game are given by a 2 \% 2 matrix. This is chosen (once) by chance, at the beginning of the game, to be either G1 or G{\texttwosuperior}, with probabilities p and 1 - p respectively. The maximizer is informed of the actual payoff matrix chosen but the minimizer is not. Denote by vn(p) the value of the n -times repeated game (with the payoff function defined as the average payoff per stage), and by v\%(p) the value of the infinitely repeated game. It is proved that vn(p)=v\%(p) + \%(p)\%(p)/\%n + \%\%1/\%n\% , where \%(p) is on appropriately scaled normal distributiondensity function evaluated at its p-quantile, and the coefficient K(p) is either 0 or the absolute value of a linear function in p.},
url = {/files/dp70.pdf},
author = {Zamir, Jean-Francois Mertens and Shmuel}
}
@booklet {rudolfavenhaus-ig1995,
title = {Inspection Games},
journal = {Discussion Papers},
number = {68},
year = {1995},
month = {2},
publisher = {In R. J. Aumann \& S. Hart (eds.), Handbook of Game Theory, Vol. III, (2002) North-Holland},
abstract = {Starting with the analysis of arms control and disarmament problems in the sixties, inspection games have evolved into a special area of game theory with specific theoretical aspects, and, equally important, practical applications in various fields of human activities where inspection is mandatory. In this contribution, a survey of applications is given first. Then, the general problem of inspection is presented in a game theoretic framework as an extension of a statistical hypothesis presented in a testing problem. Using this framework, two important models are solved: material accountancy and dataverification. A second important aspect of inspection games are limited inspection resources that have to be used strategically. This is presented in the context of sequential inspection games, where many mathematically challenging models have been studies. Finally, the important concept of leadership, where the inspector becomes a leader by announcing and committing himself to his strategy, is shown to apply naturally to inspection games.},
url = {/files/db68.pdf},
author = {Rudolf Avenhaus, Bernhard von Stengel and Shmuel Zamir}
}
@booklet {aumann-ie1995,
title = {Interactive Epistemology},
journal = {Discussion Papers},
number = {67},
year = {1995},
month = {2},
publisher = {International Journal of Game Theory 28 (1999), 263-314},
abstract = {Formal Interactive Epistemology deals with the logic of knowledge and belief when there is more than one agent or "player". One is interested not only in each person{\textquoteright}s knowledge about substantive matters, but also in his knowledge about the others{\textquoteright} knowledge. These notes examine two parallel approaches to the subject. The first is the semantic approach, in which knowledge is represented by a space \% of states of the world, together with partitions \%i of \% for each player i; the atom of \%i containing a given state \% of the world represents the set of those states that i cannot distinguish from \%. The second is the syntactic approach, in which knowledge is represented by abstract formulas constructed according to certain syntactic rules. These notes examine the relation between the two approaches, and show that they are in a sense equivalent. In game theory and economics, the semantic approach has heretofore been most prevalent. A question that often arises in this connection is whether, in what sense, and why the space \% and the partitions \%i can be taken as given and commonly known by the players. An answer to this question is provided by the syntactic approach. Other topics that are taken up include various formalizations of "common knowledge", and the "Agreement Theorem" of J. Cave and M. Bacharach. The notes end with an application of these ideas to the context of probabilistic beliefs.},
url = {{\textquoteright}},
author = {Robert J. Aumann}
}
@booklet {aumann-rabr1995,
title = {Rationality and Bounded Rationality},
journal = {Discussion Papers},
number = {76},
year = {1995},
month = {5},
publisher = {In S. Hart \& A. Mas-Colell (eds.) Cooperation: Game Theoretic Approaches. Berlin: Springer (1997) 219-232; Also in Frontiers of Research in Economic Theory, The Nancy L. Schwartz Memorial Lectures, 1983-1997},
abstract = {A survey of bounded rationality models and ideas in Game Theory. Topics covered include: The evolutionary approach to optimization {\textendash} and specifically to game theory {\textendash} and its implications for the idea of bounded rationality; evolutionary dynamics; "rule rationality" as opposed to "act rationality"; "trembles" and refinements in general; "crazy" perturbations; failure of common knowledge of rationality; limiting average payoff in infinitelyrepeated games; epsilon equilibria; players modeled as computers, finite state automata, or Turing machines; paradoxes (such as Ellsberg or Allais); laboratory experiments; and finally, an open problem.},
url = {{\textquoteright}},
author = {Robert J. Aumann}
}
@booklet {maschler-trbssdaetng1995,
title = {Reactive Bargaining Set: Structure, Dynamics and Extension to NTU Games, The},
journal = {Discussion Papers},
number = {81},
year = {1995},
month = {8},
publisher = {International Journal of Game Theory 26 (1997) 75-95.},
abstract = {The reactive bargaining set (Granot [1994]) is the set of outcomes for which no justified objection exists. Here, in a justified objection the objector first watches how the target tries to act (if he has such an option), and then reacts by making a profit and ruining the target{\textquoteright}s attempt to maintain his share. In this paper we explore properties of the reactive bargaining set, set up the system of inequalities that defines it, and construct a dynamic system in the sense of Stearn{\textquoteright}s transfer scheme that leads the players to this set. We also extend the definition of the reactive bargaining set to NTU games in a way that keeps it nonempty. To shed light on its nature and its relative ease of computation, we compute the reactive bargaining set for games that played important role in the game theory literature.},
url = {{\textquoteright}},
author = {Maschler, Daniel Granot and Michael, Howlett,}
}
@booklet {aumann-rtmay1995,
title = {Reply to Margalit and Yaari},
journal = {Discussion Papers},
number = {78},
year = {1995},
month = {6},
publisher = {In K. J. Arrow, E. Colombatto, M. Perlman \& C. Schmidt (eds.), The Rational Foundations of Economic Behavior (1996), Macmillan, Basingstoke and London 106-107},
abstract = {A reply to Margalit and Yaari{\textquoteright}s paper "Rationality and Comprehension", in which they comment on my papers "Agreeing to disagree" [1] and "Notes on Interactive Epistemology" [2]. Inter alia, we point out that contrary to Margalit and Yaari{\textquoteright}s claim, in [1] the agents need not condition on the same events; and in [2], the state space is not assumed as analytic knowledge, but is derived. In addition, a simple resolution of the "hangman{\textquoteright}s paradox" is offered.},
url = {http://www.ma.huji.ac.il/~raumann/documents/replymargaliyaari.pdf},
author = {Robert J. Aumann}
}
@booklet {rosenthal-saws1995,
title = {Simultaneous Auctions with Synergies},
journal = {Discussion Papers},
number = {75},
year = {1995},
month = {3},
abstract = {Motivated by recent auctions of licenses for the radio-frequency spectrum, we consider situations where multiple objects are auctioned simultaneously by means of a second-price, sealed-bid auction. For some buyers, called global bidders, the value of multiple objects exceeds the sum of objects{\textquoteright} values separately. Others, called local bidders, are interested in only one object. In a simple independent private values setting, we (a) characterize an equilibrium that is symmetric among the global bidders; (b) show that the addition of bidders often leads to less aggressive bidding; and (c) compare the revenues obtained from the simultaneous auction to those from its sequential counterpart.},
url = {/files/dp75.pdf},
author = {Rosenthal, Vijay Krishna and Robert W.}
}
@booklet {risse-asmofapsp1995,
title = {A Syntactic Model of Forgetting: A Partially Solved Problem},
journal = {Discussion Papers},
number = {79},
year = {1995},
month = {6},
abstract = {We look at a set \% of states of the world which are defined as maximal consistent lists of formulae formed in a language which contains a knowledge operator ki for each agent i. The states of the world induce an information partition for each agent i such that all those \% \% \% are included in the same information cell which contain the same range of knowledge for this agent (this range of knowledge we will call the agent{\textquoteright}s ken). We can then ask what it means that some agent i forgets which one of a variety of kens he has. This question can be answered easily if we use states of the world as primitives: then the answer is just to take a union over information cells. This does not make sense any more when states of the world are lists of formulae. We find a solution to this question for the case of one agent and show why the same solution cannot be used for the case of more than one agent. In an appendix, we apply results obtained before to analyze the j-operator (the knowing-whether operator). The larger context in which our question arose was to prove the Bachrach-Cave Agreement-Theorem in a model where states of the world are not primitives.},
url = {{\textquoteright}},
author = {Mathias Risse}
}
@booklet {rothenstein-atpsgwno1995,
title = {A Two-Period Pollution Safeguards Game with N Operators},
journal = {Discussion Papers},
number = {74},
year = {1995},
month = {3},
abstract = {The environmental need to control the quality of the air is represented by a multi-players sequential game. One model is a two period game with n + 1 players, of which n are operators and one is an inspector. The game is analyzed via the solution concept of the strategies equilibrium (Nash equilibrium). The second model assumes that all operators are identical,i.e. the payoffs are the same to all operators. The equations that describe the Nash equilibrium, are solved analytically under this assumption, and enable us to compare games with a different number of operators (n). Numerical solutions are included. A discussion of the advantages and disadvantages of individual punishment vs. collective punishment appears in the last section. The model includes a parameter which varies from full individual punishment when the inspector raises an alarm (i.e. only the operators that acted illegally are fined), to full collective punishment (i.e. all operators are fined regardless their actions). Numerical results are added.},
url = {{\textquoteright}},
author = {Daniel Rothenstein,}
}
@booklet {milchtaich-tvongafcncg1995,
title = {Value of Nonatomic Games Arising from Certain Noncooperative Congestion Games, The},
journal = {Discussion Papers},
number = {87},
year = {1995},
month = {12},
publisher = {Published As: \"Social Optimality and Cooperation in Nonatomic Congestion Games\", Journal of Economic Theory 114 (2004), 56-87},
abstract = {For a class of nonatomic congestion games, two solution concepts, a noncooperative one and a cooperative one, are compared. Each player in the game chooses one of several common facilities. The player{\textquoteright}s payoff is the difference between the reward and the cost associated with choosing that facility. The rewards are fixed and player-specific. The costs are uniform, but variable: they strictly increase with the measure of the set of players using the facility. The noncooperative solution of the game is the (unique) Nash equilibrium outcome. The cooperative one is the Aumann-Shapley value of the cooperative game that results when players are willing to cooperate in order to minimize the total utility. Using a new result in the theory of values of nonatomic games, we derive a formula for the value. We show that there is exactly one case in which the Nash equilibrium outcome andthe value always coincide: this is the case in which the costs increase logarithmically with the measure of the set of users.},
url = {/files/dp87.pdf},
author = {Igal Milchtaich}
}
@booklet {milchtaich-vmgbomwviaidvs1995,
title = {Vector Measure Games Based on Measures with Values in an Infinite Dimensional Vector Space},
journal = {Discussion Papers},
number = {89},
year = {1995},
month = {12},
publisher = {Games and Economic Behavior 24 (1998), 25-46},
abstract = {The following generalization of a theorem of Aumann and Shapley is proved: A vector measure game of the form f{\textquoteright}$^\circ$\%, where \% is a nonatomic banach-space measure of bounded variation and f is a weakly continuously differentiable real-valued function defined on the closed convex hull of the range of \% such that f(0)=0, is in pNA. If the game is monotonic, then the conclusion holds even if at 0 f is only continuous, and not differentiable. The value of the game is given by the diagonal formula. These results are used for giving a new, relatively short, proof to the result that, under certain conditions, a market game is in pNA.},
url = {/files/dp89.pdf},
author = {Igal Milchtaich}
}
@booklet {mayabar-hillel-waprtelt1995,
title = {Why Are People Reluctant to Exchange Lottery Tickets?},
journal = {Discussion Papers},
number = {71},
year = {1995},
month = {3},
publisher = {Journal of Personality and Social Psychology 70 (1996), 17-27},
abstract = {In a series of experiments, we demonstrate that people are reluctant to exchange lottery tickets. In other words, when given a small incentive to exchange a lottery ticket with which they had just been endowed for a different one, with the same probability of winning the same prize, only about 50\% choose to do so. In contrast, when given the same incentive to exchange a pen with which they had just been endowed for another pen just like it, over 90\% choose to do so. We discuss {\textendash} and rule out {\textendash} a series of possible explanations for this effect, including: distorted subjective probabilities; fear of finding out that you gave up a wining ticket; lack of sufficient incentive (i.e.,transaction cost); general confusion or "paranoia"; etc. We conclude that people will not exchange ex ante identical tokens of the same type unless the two tokens will be identical ex post as well. A lottery ticket with which one has been endowed becomes at once the status quo, or reference point, with respect to which changes are evaluated for possible gains and losses. Since losses loom larger than gains, two lottery tickets which are symmetrical before they pass into one{\textquoteright}s possession are no longer symmetrical once one of them becomes one{\textquoteright}s own.},
url = {/files/dp71.pdf},
author = {Maya Bar-Hillel, Efrat Neter}
}
@booklet {morgan-aaotwoaataa1994,
title = {An Analysis of the War of Attrition and the All-Pay Auction},
journal = {Discussion Papers},
number = {56},
year = {1994},
month = {8},
abstract = {We study the war of attrition and the all-pay auction when players{\textquoteright} signals are affiliated and symmetrically distributed. We (a) find sufficient conditions for the existence of symmetric monotonic equilibrium biddingstrategies; and (b) examine the performance of these auction forms in terms of the expected revenue accruing to the seller. Under our conditions the war of attrition raises greater expected revenue than all other known sealed did auction forms.},
url = {/files/dp56.pdf},
author = {Morgan, Vijay Krishna and John}
}
@booklet {aumann-biackor1994,
title = {Backward Induction and Common Knowledge of Rationality},
journal = {Discussion Papers},
number = {59},
year = {1994},
month = {12},
publisher = {Games and Economic Behavior 8 (1995), 6-19},
abstract = {We formulate precisely and prove the proposition that if common knowledge of rationality obtains in a game of perfect information, then the backward induction outcome is reached.},
url = {{\textquoteright}},
author = {Robert J. Aumann}
}
@booklet {volij-bcacfdritcobp1994,
title = {Bilateral Comparisons and Consistent Fair Division Rules in the Context of Bankruptcy Problems},
journal = {Discussion Papers},
number = {51},
year = {1994},
month = {6},
publisher = {International Journal of Game Theory 26 (1997), 11-26},
abstract = {We analyze the problem of extending a given bilateral principle of justice to a consistent n-creditor bankruptcy rule. Based on the bilateral principle, we build a family of binary relations on the set of creditors in order to make bilateral comparisons between them. We find that the possibility of extending a specific bilateral principle of justice in a consistent way is closely related to the quasi-transitivity of the binary relations mentioned above.},
author = {Volij, Nir Dagan and Oscar}
}
@booklet {pitowsky-otcotpimm1994,
title = {On the Concept of the Proof in Modern Mathematics},
journal = {Discussion Papers},
number = {42},
year = {1994},
month = {3},
abstract = {This paper deals with the attempts to characterize the set of all proofs in a given mathematical domain such as geometry or number theory. The characterization usually takes the form of a finite list of axiom schemata and inference rules, which is thought to be complete. A related effort, which originated with descartes, is to replace proofs - that is, reasoning about concepts and relations - by the solution of algebraic equations which are shown to be equivalent to the proofs. These formalist tendencies have always been opposed by intuitionists. I trace the dispute from descartes and Leibnitz through Kant all the way to its climax in the fifty years between the demonstration of the relative consistency of hyperbolic geometry and the discovery of Godel{\textquoteright}s theorems. My purpose is both historical and philosophical. On the historical level, I argue that Hilbert{\textquoteright}s program was not only a foundationalist effort to secure the consistency of mathematics. It was, in addition, an internal mathematical program in the aforementioned cartesian tradition of replacing proofs by computations. The demise of Hilbert{\textquoteright}s philosophical pretensions brought considerable and unexpected success to the mathematical program: Godel{\textquoteright}s theorem, which shows how to replace proofs by computations in very extensive domains of mathematics, and, ultimately, the Davis-Robinson-Putnam-Matijacevic theorem, which demonstrates, roughly, that every proof in those domains is equivalent to a solution of an algebraic (i.e. polynomial) equation. The fact that the notion of proof in number theory is indefinitely extensible (by Godel{\textquoteright}s theorem) depends on a complete characterization of the concept of {\textquoteleft}computation{\textquoteright} (the Church-Turing thesis). On the philosophical level, I argue that this dependence undermines some contemporary intuitionist claims (by Weyl and Dummett) which are based on Godel{\textquoteright}s results.},
author = {Itamar Pitowsky}
}
@booklet {dagan-cdatwac1994,
title = {Consistency, Decentralization and the Walrasian Allocations Correspondence},
journal = {Discussion Papers},
number = {43},
year = {1994},
month = {1},
abstract = {In this paper we study finite-agent exchange economies. We extend the classical model by adding an imports-exports vector, which defines the markets clearing conditions of the economy. Equipped with this new definition, self-consistency properties are naturally defined. We show that the Core correspondence and the Walrasian allocations correspondence are self-consistent. In addition, we present an axiomatic characterization of the Walrasian allocations correspondence for a class of convex and smooth economies. All the axioms presented in the characterization are satisfied by the Core, except for a converse-consistency property, which can be interpreted as a requirement of decentralization.},
author = {Nir Dagan}
}
@booklet {ezraeiny-cassolgaie1994,
title = {Core and Stable Sets of Large Games Arising in Economics},
journal = {Discussion Papers},
number = {58},
year = {1994},
month = {11},
publisher = {Journal of Economic Theory 68 (1996), 200-211},
abstract = {It is shown that the core of a non-atomic glove-market game which is defined as the minimum of finitely many non-atomic probability measures is a von-Neumann Morgenstern stable set. This result is used to characterize some stable set of large games which have a decreasing returns to scale property. We also study exact non-atomic glove-market games. In particular we show that in a glove-market game which consists of the minimum of finitely many mutually singular non-atomic measures, the core is a von-Neumann Morgenstern stable set if the game is exact. We also discuss the intuitive appeal of the equivalence of the core and stable set. We do this by employing the theory of social situations [5] and highlighting the negotiation processes that underlie these two notions.},
url = {/files/dp58.pdf},
author = {Ezra Einy, Ron Holzman and Benyamin Shitovitz}
}
@booklet {garybornstein-teorpitiaitg1994,
title = {Effect of Repeated Play in the IPG and IPD Team Games, The},
journal = {Discussion Papers},
number = {46},
year = {1994},
month = {3},
publisher = {Journal of Conflict Resolution 38 (1994), 690-707},
abstract = {Repeated interaction in intergroup conflict was studied in the context of two team games: The Intergroup Public Goods (IPG) game and the Intergroup Prisoner{\textquoteright}s Dilemma (IPD) game. The results reveal (a) a main effect for game type; subjects were twice as likely to contribute towards their group effort in the IPG game than in the IPD game, and (b) a game-type X time interaction; subjects contributed less over time in the IPD game while continuing to contribute at about the same rate in the IPG game. The second finding supports the hypothesis that subjects learn the structure of the game and adapt their behavior accordingly, and is compatible with a simple learning model (Roth \& Erev, 1993) which assumes that choices that have led to good outcomes in the past are more likely to be repeated in the future. A reciprocal cooperation hypothesis which assumes that players make their choices contingent on the earlier choices of the other players received little support.},
url = {/files/dp46.pdf},
author = {Gary Bornstein, Ido Erev and Harel Goren}
}
@booklet {brandenburger-ecfne1994,
title = {Epistemic Conditions for Nash Equilibrium},
journal = {Discussion Papers},
number = {57},
year = {1994},
month = {10},
publisher = {Econometrica 63 (1995), 1161-1180},
abstract = {Sufficient conditions for Nash equilibrium in an n-person game are given in terms of what the players know and believe - about the game, and about each other{\textquoteright}s rationality, actions, knowledge, and beliefs. Mixed strategies are treated not as conscious randomizations, but as conjectures, on the part of other players, as to what a player will do. Common knowledge plays a smaller role in characterizing Nash equilibrium than had been supposed. When n = 2, mutual knowledge of the payoff functions, of rationality, and of the conjectures implies that the conjectures form a Nash equilibrium. When n \% 3 and there is a common prior, mutual knowledge of the payoff functions and of rationality, and common knowledge of the conjectures, imply that the conjectures form a Nash equilibrium. Examples show the results to be tight.},
url = {/files/dp57.pdf},
author = {Brandenburger, Robert J. Aumann and Adam, Ori et al,}
}
@booklet {neyman-aepfpce1994,
title = {An Equivalence Principle for Perfectly Competitive Economies},
journal = {Discussion Papers},
number = {47},
year = {1994},
month = {5},
publisher = {Journal of Economic Theory 75 (1997), 314-344},
abstract = {It is a striking fact that different solutions become equivalent in the setting of perfectly competitive economies. We provide an axiomatic approach to this equivalence phenomenon. A solution is viewed as a correspondence which maps each economy to a subset of its individually rational and Pareto-optimal allocations. Four axioms are placed on the correspondence: anonymity, equity, consistency and restricted continuity. It is shown that the axioms categorically determine the Walrasian correspondence. The equivalence of other solutions, such as the core or value allocations, now follows by checkingthat they too satisfy the axioms.},
url = {/files/dp47.pdf},
author = {Neyman, Pradeep Dubey and Abraham}
}
@booklet {margalit-teosb1994,
title = {Ethics of Second-Order Beliefs, The},
journal = {Discussion Papers},
number = {41},
year = {1994},
month = {3},
abstract = {The questions I address my paper are: Are people morally responsible for their beliefs? Are people{\textquoteright}s beliefs voluntary - can they be chosen and decided upon? What is the nature of the analogy between obligation of belief and the obligation of have certain emotions and not others? Can the skeptic suspend his belief? And finally, what is the sin of the heretic? I argue that it is right and proper for belief to be evaluated morally even if they are not voluntary and therefore not under our control. The key to my view is the fact that human beings possess second-order as well as first-order beliefs.},
url = {{\textquoteright}},
author = {Avishai Margalit}
}
@booklet {agastya-aebmodp1994,
title = {An Evolutionary Bargaining Model (revision of Discussion Paper $\#$38)},
journal = {Discussion Papers},
number = {61},
year = {1994},
month = {12},
abstract = {A non-negative function f defined on the class of subsets of a finite set of factors of production describes the production possibilities at each date. The problem of allocating the surplus among the factors is studied in a dynamic learning model. Representatives for the factors (called players) make wage demands naively based on precedent and ignorant of each others{\textquoteright} utilities for this good. A global convergence result shows that players learn to reach some (and only a) core allocation in the long run. If players make mistakes however, only a strict subset of the core allocations are likely, i.e., stochastically stable. The main result shows that in the limit, these stable allocations for a particular set of players, converge to the allocation that maximizes the product of all the players{\textquoteright} utilities over core allocations.},
url = {/files/dp61.pdf},
author = {Murali Agastya}
}
@booklet {feinberg-esoae1994,
title = {Evolutionary Selection of an Equilibrium},
journal = {Discussion Papers},
number = {39},
year = {1994},
month = {1},
abstract = {We analyze the long-run behavior of a population engaged in a 2x2 evolutionary game undergoing mutation effects. We assume that the rates of mutation are exogenously and randomly determined. It is shown that if high mutation rates are possible but highly improbable, then the population evolves towards the risk dominant equilibrium (Harsanyi and Selten, 1988).},
author = {Yossi Feinberg}
}
@booklet {ma-irrmwii1994,
title = {Infinitely Repeated Rental Model with Incomplete Information},
journal = {Discussion Papers},
number = {54},
year = {1994},
month = {6},
publisher = {Economics Letters 49 (1995), 261-266.},
abstract = {In an infinitely repeated rental model with two types of buyer and no discounting, the set of all Nash equilibrium payoffs for the seller and the buyer is characterized.},
url = {/files/dp54.pdf},
author = {Jinpeng Ma}
}
@booklet {danielgranot-tkoastg1994,
title = {Kernel/Nucleolus of a Standard Tree Game, The},
journal = {Discussion Papers},
number = {45},
year = {1994},
month = {3},
publisher = {International Journal of Game Theory 25 (1996), 219-244},
abstract = {In this paper we characterize the nucleolus (which coincides with the kernel) of a tree enterprise. We also provide a new algorithm to compute it, which sheds light on its structure. We show that in particular cases, including a chain enterprise one can compute the nucleolus in O(n) operations, where n is the number of vertices in the tree.},
author = {Daniel Granot, Michael Maschler, Guillermo Owen and Weiping R. Zhu}
}
@booklet {dagan-otlspit1994,
title = {On the Least Sacrifice Principle in Taxation},
journal = {Discussion Papers},
number = {50},
year = {1994},
month = {6},
abstract = {Utilitarian philosophers and economists recommended that when applying taxation programs, government should minimize the sum total of sacrifice made by individuals. This paper presents a model and an axiom system of taxation policies, in which the Least Sacrifice Principle is derived. A key axiom in our characterization is self-consistency. Other relations between self-consistency and welfare maximization in our model and in other models are also discussed.},
url = {{\textquoteright}},
author = {Nir Dagan}
}
@booklet {winter-anasecfpbi1994,
title = {A Necessary and Sufficient Epistemic Condition for Playing Backward Induction},
journal = {Discussion Papers},
number = {48},
year = {1994},
month = {6},
publisher = {Journal of Mathematical Economics 27 (1997), 325-345},
abstract = {In an epistemic framework due to Aumann we characterize the minimal condition on the rationality of the players that implies backward induction in perfect information games in agent form. This condition requires each player to know that the players are rational at later, but not at previous decision nodes.},
url = {/files/dp48.pdf},
author = {Winter, Dieter Balkenborg and Eyal}
}
@booklet {dagan-ncoobr1994,
title = {New Characterizations of Old Bankruptcy Rules},
journal = {Discussion Papers},
number = {44},
year = {1994},
month = {1},
publisher = {Social Choice and Welfare 13 (1996), 51-59},
abstract = {This paper presents axiomatic characterizations of two bankruptcy rules discussed in Jewish legal literature: the Constrained Equal Awards rule and the Contested Garment principle (the latter is defined only for two-creditor problems). A major property in these characterizations is independence of irrelevant claims, which requires that if an individual claim exceeds the total to be allocated it should be considered irrelevant.},
author = {Nir Dagan}
}
@booklet {orshan-np1994,
title = {Non-Symmetric Prekernels},
journal = {Discussion Papers},
number = {60},
year = {1994},
month = {12},
abstract = {A "symmetry" property, either in the version of "equal treatment" or in the version of "anonymity", is one of the standard intuitively acceptable properties satisfied by most well known solution concepts in game theory. However,there are many instances where symmetry is counterintuitive. This paper analyzes non-symmetric prekenels: solution concepts that satisfy Peleg{\textquoteright}s axioms for the prekernel [1986, 1987], with equal treatment replaced by the requirement that the solution of each 2-person game consists of a unique point. It is shown that non-symmetric prekernels do exits and then a full characterization is provided.},
url = {{\textquoteright}},
author = {Gonni Orshan}
}
@booklet {agastya-ootsv1994,
title = {Ordinality of the Shapley Value},
journal = {Discussion Papers},
number = {62},
year = {1994},
month = {12},
abstract = {In Roth (1977) it is argued that the Shapley value is the cardinal utility of playing a game and it inherits properties used to define the underlying game itself. Implicit in this statement is the assumption that the TU game is generated by allowing for lotteries over an underlying set of alternatives.However, often there is a single numeraire good that can generate a game. In such instances, it is unclear why the utility of playing a game is cardinal when the preferences for the underlying good are ordinal. This paper presents a framework in which the Shapley value emerges as the representation of a preference ordering over a set of games. This representation is unique only up to a positive monotone transformations thereby establishing the ordinality of the value.},
url = {{\textquoteright}},
author = {Murali Agastya}
}
@booklet {monderer-pawvong1994,
title = {Potentials and Weighted Values of Non-Atomic Games},
journal = {Discussion Papers},
number = {53},
year = {1994},
month = {8},
publisher = {Mathematics of Operations Research 22 (1997), 619-630},
abstract = {The "potential approach" to value theory for finite games was introduced by Hart and Mas-Colell (1989). Here this approach is extended to non-atomic games. On appropriate spaces of differentiable games there is a unique potential operator, that generates the Aumann and Shapley (1974) value. As a corollary we obtain the uniqueness of the Aumann - Shapley value on certain subspaces of games. Next, the potential approach is applied to the weighted case, leading to "weighted non-atomic values". It is further shown that the asymptotic weighted value is well-defined, and that it coincides with the weighted value generated by the potential.},
url = {/files/ pot-w.html},
author = {Monderer, Sergiu Hart and Dov}
}
@booklet {avishaimargalit-rac1994,
title = {Rationality and Comprehension},
journal = {Discussion Papers},
number = {40},
year = {1994},
month = {2},
publisher = {In K. J. Arrow, E. Colombatto, M. Perlman \& C. Schmidt (eds.), The Rational Foundations of Economic Behavior, (1996) Macmillan, Basingstoke and London, 89-100},
abstract = {Devising a theory of knowledge for interacting agents has been on many people{\textquoteright}s minds recently. A near consensus has emerged, that the appropriate framework is a multi-agent version of C.I. Lewis{\textquoteright}s system S5 or one of S5{\textquoteright}s standard weakenings. In this essay, it is argued that such a framework cannot possibly be adequate, if it is to capture the intricacies of genuine inter-agent epistemics. Introducing a notion of "comprehension" {\textendash} knowledge which is non-sensory yet non-analytic {\textendash} may possibly be a remedy.},
url = {/files/dp40.pdf},
author = {Avishai Margalit, Menahem E. Yaari}
}
@booklet {ma-smareiatmm1994,
title = {Stable Matchings and Rematching-Proof Equilibria in a Two-Sided Matching Market},
journal = {Discussion Papers},
number = {55},
year = {1994},
month = {6},
publisher = {Journal of Economic Theory 66 (1995), 352-369},
abstract = {In this paper we introduce the notion of a rematching-proof equilibrium for a two-sided matching market to resolve Roth{\textquoteright}s open question: What kind of equilibria of the game induced by any stable mechanism with respect to misreported profiles produce matchings that are stable with respect to the true profile. We show that the outcome of a rematching-proof equilibrium is stable with respect to the true profile even though the equilibrium profile may contain misreported preferences. We show that a rematching-proof equilibrium exists. Moreover, we extend these two results to the strong equilibria. Furthermore, the Nash equilibria in Roth [11] are shown to be rematching-proof equilibria. The relation between the rematching-proof equilibria and the strong equilibria is discussed as well.},
url = {/files/dp55.pdf},
author = {Jinpeng Ma}
}
@booklet {balkenborg-saes1994,
title = {Strictness and Evolutionary Stability},
journal = {Discussion Papers},
number = {52},
year = {1994},
month = {7},
abstract = {The notion of a strict equilibrium set is introduced as a natural extension of the notion of a strict equilibrium point. The evolutionary stable sets of a truly asymmetric contest are shown to be behaviorally equivalentto the strict equilibrium sets of an "agent representation" of the contest. Using variants of the replicator dynamic we provide dynamic characterizations of strict equilibrium sets. We do this both for truly asymmetric contests and for arbitrary normal form games modelling conflicts between several distinct species.},
url = {/files/dp52.pdf},
author = {Dieter Balkenborg}
}
@booklet {perry-viibi1994,
title = {Virtual Implementation in Backwards Induction},
journal = {Discussion Papers},
number = {64},
year = {1994},
month = {12},
publisher = {Games and Economic Behavior 15 (1996), 27-32},
abstract = {We examine a sequential mechanism which is a simple modification of the normal form mechanism introduced by Abreu and Matsushima (1992). We show that almost any social choice function can be virtually implemented via a finite sequential game of perfect information. The solution concept assumed is Subgame Perfect Equilibrium or Iterative Elimination of Strictly Dominated Strategies. In particular, any social choice function that is virtually implementable via the Abreu-Matsushima{\textquoteright}s mechanism is also virtually implementable by a sequential mechanism.},
url = {/files/dp64.pdf},
author = {Perry, Jacob Glazer and Motty}
}
@booklet {winter-vav1994,
title = {Voting and Vetoing},
journal = {Discussion Papers},
number = {49},
year = {1994},
month = {6},
publisher = {American Political Science Review 90 (1996), 813-823},
abstract = {The consequences of veto power in committees is analyzed using the approach of non-cooperative bargaining theory. It is first shown that in equilibrium non-veto players do not share in the benefits gained by the decision making of the committee, i.e, in every equilibrium outcome of the bargaining game non-veto players earn zero. Some measures for reducing the excessive power of veto members in committees are analyzed. Specifically, we study the effects of imposing a deadline on negotiations and of expanding the committee by increasing the number of non-veto players.},
url = {/files/dp49.pdf},
author = {Winter, Eyal}
}
@booklet {frankthuijsman-amafbob1993,
title = {Automata, Matching and Foraging Behavior of Bees},
journal = {Discussion Papers},
number = {30},
year = {1993},
month = {8},
publisher = {Journal of Theoretical Biology 175 (1995), 305-316},
abstract = {In this paper we discuss two types of foraging strategies for bees. Each of these explicit strategies explains that in the environment of a monomorphic bee community the bees will distribute themselves over the available homogeneous nectar sources according to the Ideal Free Distribution. At the same time these strategies explain that in single-bee experimental settings a bee will match, by its number of visits, the nectar supply from the available sources (the Matching Law). Moreover, both strategies explain that in certain situations the bees may behave as if they are risk averse, i.e spend more time on the flower type with the lower variance in nectar supply.},
url = {/files/dp30.pdf},
author = {Frank Thuijsman, Bezalel Peleg, Mor Amitai and Avi Shmida}
}
@booklet {bicchieri-cbcaer1993,
title = {Counterfactuals, Belief Changes, and Equilibrium Refinements},
journal = {Discussion Papers},
number = {32},
year = {1993},
month = {9},
publisher = {Philosophical Topics 21 (1993), 21-52},
abstract = {The literature on Nash equilibrium refinements provides several ways to check the stability of a Nash equilibrium against deviations from equilibrium play. Stability, however, is a function of how a deviation is being interpreted. An equilibrium that is stable under one interpretation may cease to be stable under another, but the refinement literature provides no general criterion to judge the plausibility of different interpretations of off-equilibrium play. This paper specifies a model of belief revision that minimizes the loss of useful information. When several interpretations are compatible with off-equilibrium play, the one that requires the least costly belief revision (in terms of informational value) will be chosen by the players. This model of belief revision generates a plausibility ranking of interpretations of deviations, hence it also provides a ranking of Nash equilibrium refinements.},
author = {Cristina Bicchieri}
}
@booklet {harel-eaficltcfacldocf1993,
title = {Efficiency and Fairness in Criminal Law: The Case for a Criminal Law Doctrine of Comparative Fault},
journal = {Discussion Papers},
number = {28},
year = {1993},
month = {6},
publisher = {California Law Review 82 (1994), 1181-1222},
abstract = {Criminal law is traditionally described as directing its injunctions exclusively to actual or potential criminals. This article will argue that the traditional view is normatively unjustified both on efficiency and fairness grounds. To disregard the victim{\textquoteright}s conduct in determining the sanctions of criminals is both inefficient and unfair. It is inefficient because dismissing the behavior of the victim as irrelevant to the concerns of the criminal justice system does not provide optimal incentives for victims to take precautions against crime. It is unfair to disregard the victim{\textquoteright}s conduct because given the greater likelihood that careless potential victims (relative to cautious ones) will become actual victims of crime, the expected costs of protecting careless victims are higher than the expected costs of protecting cautious ones. Hence, under the current system, cautious victims are exploited for the sake of protecting careless ones. Both efficiency and fairness considerations suggest that criminal law should adopt a criminal law doctrine of comparative fault, under which criminals who act against careless victims would be exculpated or their punishment mitigated.},
author = {Harel, Alon}
}
@booklet {budescu-tewte1993,
title = {Elusive Wishful Thinking Effect, The},
journal = {Discussion Papers},
number = {29},
year = {1993},
month = {7},
publisher = {Thinking and Reasoning 1 (1995), 71-104},
abstract = {We define a desirability effect as the inflation of the judged probability of desirable events and the diminution of the judged probability of undesirable events. A series of studies designed to detect this effect is reported. In the first four experiments, subjects were presented with visual stimuli (a grid matrix in two colors, or a jar containing beads in two colors), and asked to estimate the probability of drawing at random one of the colors. The estimated probabilities for a defined draw were not higher when the draw entailed a gain than when it entailed a loss. In the fifth and sixth experiment, subjects read short stories each describing two contestants competing for some desirable outcome (e.g., firms competing for a contract). Some judged the probability that A would win, others judged the desirability that A would win. Story elements which enhanced a contestant{\textquoteright}s desirability without having normative bearing on its winning probability did not cause the favored contestant to be judged more likely to win. Only when a contestant{\textquoteright}s desirability was enhanced by promising the subject a monetary prize contingent on that contestant{\textquoteright}s win was there some slight evidence for a desirability effect: contestants were judged more likely to win when the subject expected a prize if they won than when the subject expected a prize if the other contestant won. In the last experiment, subjects estimated the probability of an over-20 point weekly change in the Dow Jones average, and were promised monetary prizes contingent on such a change either occurring, or failing to occur. They were also given a monetary incentive for accuracy. Subjects who desired a large change did not judge it more likely to occur than subjects who desired a small change. We discuss the difficulty of obtaining a desirability effect on probabilities, and argue that apparently wishful thinking{\textendash} in the form of optimistic cognitions {\textendash} can occur without affecting the evaluation of evidence.},
url = {/files/dp_29.pdf},
author = {Budescu, Maya Bar-Hillel and David}
}
@booklet {agastya-aebm1993,
title = {An Evolutionary Bargaining Model},
journal = {Discussion Papers},
number = {38},
year = {1993},
month = {12},
publisher = {(revised indp $\#$61)},
abstract = {Varying quantities of a single good can be produced using at least two and at most n factors of production. The problem of allocating the surplus is studied in a dynamic model with adaptive behavior. Representatives for the factors (referred to as players) make wage demands based on precedent and ignorant of each others utilities for this good. Necessary and sufficient conditions are provided under which the long-run equilibria coincide with the core allocations. Moreover, allowing for the possibility of mistakes by the players, it is shown that the unique limiting stochastically stable outcome maximizes the product of the players{\textquoteright} utilities subject to being in the core of the technology.},
author = {Murali Agastya}
}
@booklet {antonelli-gaflrabk1993,
title = {Game-Theoretic Axioms for Local Rationality and Bounded Knowledge},
journal = {Discussion Papers},
number = {31},
year = {1993},
month = {9},
publisher = {Journal of Logic, Language and Information 4 (1995), 1-23},
abstract = {We present an axiomatic approach for a class of finite, extensive form games of perfect iformation that makes use of notions like "rationality at a node" and "knowledge at a node". We show that, in general, a theory that is sufficient to infer an equilibrium must be modular: for each subgame G{\textquoteright} of a game G the theory of game G must contain just enough information about the subgame G{\textquoteright} to infer an equilibrium for G{\textquoteright}. This means, in general, that the level of knowledge relative to any subgame of G must not be the same as the level of knowledge relative to the original game G. We show that whenever the theory of the game is the same at each node, a deviation from equilibrium play forces a revision of the theory at later nodes. On the contrary, whenever a theory of the game is modular, a deviation from equilibrium play does not cause any revision of the theory of the game.},
author = {Antonelli, Cristina Bicchieri and Gian Aldo}
}
@booklet {zamir-gaoma1993,
title = {Game-Theoretical Analysis of Material Accountancy},
journal = {Discussion Papers},
number = {34},
year = {1993},
month = {9},
abstract = {Game theoretical models and analysis are provided for the sequential material accountancy problem. We model the n-period problem as a general sequential game played between the Operator and the Inspector. The game is analyzed through the solution concept of (Nash) equilibrium. We study several versions of the game corresponding to various assumption on the payoffs and the strategy sets. The first model solved is what we refer to as the static game. This is a game in which detection time is unimportant and the operator has to decide about his diversion plan at the beginning of the game (and he cannot deviate from it in a later stage). The solution of this game is obtained by its decomposition into two simpler game: a zero-sum game which determines the diversion plan and the statistical test (which turned out to be the CUMUFtest) and a second, non zero-sum game which determines the diversion probability and the false alarm probability. Next we return to the sequential game and prove that under the assumptions underlying the statistical analysis, the CUMUFtest emerges as part of the solution of the game i. e., as the inspector{\textquoteright}s strategy in equilibrium. Then we consider a {\textquoteleft}really sequential{\textquoteright} game in which early detection is important and in which the operator can retreat (in view of high observed intermediate MUF) from completing a diversion plan that he have started. We find the structure of the equilibrium and the equilibrium equations of this game. These equations turn out to be too complex to be solved analytically, hence we provide numerical solutions which give interesting insight into the problem.},
url = {/files/dp34.pdf},
author = {Zamir, Rudolf Avenhaus and Shmuel}
}
@booklet {mas-colell-hvolentce1993,
title = {Harsanyi Values of Large Economies: Non-Equivalence to Competitive Equilibria},
journal = {Discussion Papers},
number = {25},
year = {1993},
month = {2},
publisher = {Games and Economic Behavior 13 (1996), 74-99},
abstract = {We consider the relations between the competitive equilibria in economies with many agents and the value allocations of the resulting coalition games. In particular, we provide a (smooth and robust) example where the "value principle" does not hold for the Harsanyi NTU-value: there is a unique competitive equilibrium, which however does not belong to the (non-empty) set of Harsanyi value allocations.},
url = {/files/ harsa.html},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {sergiuhart-wtatcoss1993,
title = {{\textquoteright}Knowing Whether{\textquoteright}, {\textquoteright}Knowing That, {\textquoteright} and the Cardinality of State Spaces},
journal = {Discussion Papers},
number = {36},
year = {1993},
month = {10},
publisher = {Journal of Economic Theory 70 (1996), 249-256},
abstract = {We introduce a new operator on information structures which we call {\textquoteleft}Knowing whether{\textquoteright} as opposed to the standard knowledge operator which may be called {\textquoteleft}Knowing that{\textquoteright}. The difference between these operators is simple. Saying that an agent knows that a certain event occurred implies that this event indeed occurred, while saying that the agent knows whether an event occurred does not imply that the event occurred. (Formally, knowing whether X means that either it is known that X occurred or it is known that X did not occur). We show that iterating {\textquoteleft}Knowing whether{\textquoteright} operators of different agents has a remarkable property that iterations of {\textquoteleft}knowing that{\textquoteright} do not have. When we generate a sequence of events, starting with a given event and then applying {\textquoteleft}Knowing that{\textquoteright} or {\textquoteleft}not knowing that{\textquoteright} to the previous event, then the events in this sequence may be, somewhat surprisingly, contradictory. In contrast, any sequence of this type, generated with {\textquoteleft}knowing whether{\textquoteright} and {\textquoteleft}not knowing whether{\textquoteright} is never contradictory. We use this property of the {\textquoteleft}knowing whether{\textquoteright} operator to construct a simple and natural state space and information structures for two agents, such that: (1) any two states are distinct relative to some interactive knowledge of a fixed event, (2) the space has the cardinality of the continuum. This result - originally proved in a complicated manner by Aumann (1989) - demonstrates the usefulness of the {\textquoteleft}knowing whether{\textquoteright} operator.},
author = {Sergiu Hart, Aviad Heifetz and Dov Samet}
}
@booklet {zamir-lcwcacrasa1993,
title = {Loan Contracts with Collateral and Credit Rationing: A Signaling Approach},
journal = {Discussion Papers},
number = {26},
year = {1993},
month = {6},
abstract = {Loan contracts with collateral are a common instrument to allocate credit to entrepreneurs who invest in risky projects. Collateral provides lenders with partial insurance against bad outcomes. Since typically, credit is provided under incomplete information about the nature of the project to be undertaken, we investigate in this paper whether collateral can be used as an instrument to identify projects which the bank may consider to be {\textquoteright}bad{\textquoteright}. We prove that the ability of collateral to serve as a screening device depends on whether the entrepreneurs and the bank have similar ranking of project quality. If they do not, collateral is less effective as a signal. Within each regime, we identify conditions under which separating and pooling equilibria take place and we characterize the properties of these equilibria. When credit is scarce, we derive equilibria in which credit is rationed. A rationing regime eliminates pooling equilibria and generates more surplus to the bank on every project for which a loan was granted.},
url = {/files/db26.pdf},
author = {Zamir, Michael Landsberger and Shmuel}
}
@booklet {nirdagan-anvocbr1993,
title = {A Noncooperative View of Consistent Bankruptcy Rules},
journal = {Discussion Papers},
number = {37},
year = {1993},
month = {12},
publisher = {Games and Economic Behavior 18 (1997), 55-72},
abstract = {We introduce a game form that captures a non-cooperative dimension of the consistency property of bankruptcy rules. Any consistent and monotone rule is fully characterized by a bilateral principle and consistency. Like the consistency axiom, our game form, together with a bilateral principle, yields the respective consistent bankruptcy rule as a result of a unique outcome of subgame perfect equilibria. The result holds for a large class of consistent and monotone rules, including the Constrained Equal Award, the Proportional and many other well-known rules. Moreover, for a large class of rules, all the subgame perfect equilibria are coalition-proof.},
url = {/files/dp37.pdf},
author = {Nir Dagan, Roberto Serrano and Volij, Oscar}
}
@booklet {bergman-opwdiraba1993,
title = {Option Pricing with Differential Interest Rates: Arbitrage-Bands Beget Arbitrage-Ovals},
journal = {Discussion Papers},
number = {35},
year = {1993},
month = {9},
publisher = {Review of Financial Studies 8 (1995), 475-500},
abstract = {The classic Option Pricing Model is generalized to a more realistic, imperfect, dynamically incomplete capital market with different interest-rates for borrowing and for lending and a return differential between long and short positions in stock. It is found that in the absence of arbitrage opportunities, the equilibrium price of any contingent claim, or of a portfolio of such claims, must lie within an arbitrage-band. The boundaries of an arbitrage-band are computed as solutions to a quasi-linear partial-differential-equation, and, in general, each end-point of such a band depends on both interest-rates for borrowing and for lending. This, in turn, implies that the vector of concurrent equilibrium prices of different contingent-claims - even claims that are written on different underlying assets - must lie within a computable oval in the price space.},
url = {/files/dp35.pdf},
author = {Yaacov Z. Bergman}
}
@booklet {philipjreny-tpcoagwsp1993,
title = {Partnered Core of a Game with Side Payments, The},
journal = {Discussion Papers},
number = {33},
year = {1993},
month = {9},
abstract = {We introduce the notion of the partnered core of a game. A payoff is partnered if there are no asymmetric dependencies between any two players. A payoff is in the partnered core of a game if it is partnered, feasible and cannot be improved upon by any coalition of players. We show that the relative interior of the core of a game with side payments is contained in the partnered core. For quasi-strictly convex games the partnered core coincides with the relative interior of the core. When there are no more than three partnerships, the sums of the payoffs to partnerships are constant across all core payoffs. When there are no more than three players, the partner core satisfies additional properties.},
author = {Philip J. Reny, Eyal Winter and Myrna Holtz Wooders}
}
@booklet {volij-rwtra1993,
title = {Rationality Without the Reduction Axiom},
journal = {Discussion Papers},
number = {24},
year = {1993},
month = {1},
abstract = {Two results concerning the relation between rationality and equilibrium concepts in normal form games are generalized for the case where players do not satisfy the reduction of compound lotteries axiom. The crucial axiom of expected utility theory is the independence axiom which itself is a combination of two axioms: the compound independence axiom and the reduction of compound lotteries axiom. This paper is an effort to extend game theory to non-expected utility preferences. It generalizes the results of Aumann (1987) and Aumann and Brandenburger (1991) to games with players who do not satisfy the reduction of compound lotteries axiom. We show that the results of the above authors do not depend on the specific definition of rationality applied by them.},
url = {{\textquoteright}},
author = {Volij, Oscar}
}
@booklet {hurwitz-tgamoic1993,
title = {Team Games as Models of Intergroup Conflicts},
journal = {Discussion Papers},
number = {27},
year = {1993},
month = {6},
abstract = {The internal problem of collective action that arises when groups, as opposed to individuals, are in conflict cannot be studied in the context of two-person games that treat the competing groups as unitary players. Traditional N-person games are also too restrictive for this purpose, since they ignore the conflict of interests between the groups. Because the intergroup conflict motivates the need for intragroup collective action, and the groups{\textquoteright} respective success in mobilizing collective action determines the outcome of the intergroup competition, the intergroup and intragroup levels should be considered simultaneously. This paper: (a) proposes to model intergroup conflicts as team games (Palfrey \& Rosenthal, 1983); (b) offers an initial taxonomy for this class of games; and (c) illustrates some applications for strategic analyses of intergroup conflict and political interactions.},
url = {{\textquoteright}},
author = {Hurwitz, Gary Bornstein and Roger}
}
@booklet {volij-tbpacba1992,
title = {Bankruptcy Problem: A Cooperative Bargaining Approach, The},
journal = {Discussion Papers},
number = {16},
year = {1992},
month = {11},
publisher = {Mathematical Social Sciences 26 (1993), 287-297},
abstract = {We associate each bankruptcy problem with a bargaining problem and derive old and new allocation rules for the former by applying well known bargaining solutions to the latter.},
url = {{\textquoteright}},
author = {Volij, Nir Dagan and Oscar}
}
@booklet {winter-bic1992,
title = {Bargaining in Committees},
journal = {Discussion Papers},
number = {22},
year = {1992},
month = {12},
publisher = {Published as \"Negotiations in Multi-Issue Committees\", Journal of Public Economics 65 (1997), 323-342},
abstract = {We propose a non-cooperative treatment to the problem of collective decision making within committees, by modelling this process as a sequential bargaining game. We show that stationary subgame perfect equilibria of this bargaining game fully implements the core of the corresponding committee problem. We also discuss the inefficiency of non-stationary (subgame perfect) equilibria, and shortly refer to the problem of manipulability. Based on these results we then consider multi-issue committees, and address the problem of constructing agendas. In particular we will argue in favor of agendas where the important issues are discussed first.},
url = {{\textquoteright}},
author = {Winter, Eyal}
}
@booklet {bergman-bnffabt1992,
title = {Bayesian Non-Cooperative Foundations for Axiomatic Bargaining Theories},
journal = {Discussion Papers},
number = {12},
year = {1992},
month = {9},
abstract = {In the first part, Rubinstein{\textquoteright}s two-person, complete information, alternating-offers bargaining model is extended to that of a fairly general contested pie which accommodates non-stationary preferences and physical joint payoffs with non-stationary constraints and outside options. The generalization in discrete-time and its continuous-time limit as bargaining rounds shorten are designed to bring the non-cooperative alternating-offers bargaining model to a form whose predictions can be compared and contrasted with those of the various axiomatic bargaining theories. This is done in the second part, where a bayesian approach is developed, which is used to optimally predict bargaining outcomes in game situations where full information about the bargaining procedure is lacking. This methodology gives rise to bayesian bargaining solution-functions, that generalize axiomatic bargaining solution-functions, thus setting the axiomatic theories on non-cooperative foundations.},
url = {{\textquoteright}},
author = {Yaacov Z. Bergman}
}
@booklet {peleg-cce1992,
title = {Coalition-Proof Communication Equilibria},
journal = {Discussion Papers},
number = {9},
year = {1992},
month = {7},
publisher = {Social Choice Welfare and Ethics, W.A. Barnet, H. Moulin, M. Salles \& N.J. Schofield (eds.), Cambridge University Press (1995), 289-300},
abstract = {We offer a definition of coalition-proof communication equilibria. The use of games of incomplete information is essential to our approach. Deviations of coalition are introduced after their players are informed of the actions they should follow. therefore, improvements by coalition on a given correlated strategy should always be made when their players have private information. Coalition-proof communication equilibria of two-person games are characterized by "information efficiency". Several examples are analyzed, including the Voting Paradox.},
url = {{\textquoteright}},
author = {Peleg, Ezra Einy and Bezalel}
}
@booklet {milchtaich-cgwpp1992,
title = {Congestion Games with Player-Specific Payoffs},
journal = {Discussion Papers},
number = {15},
year = {1992},
month = {10},
publisher = {Games and Economic Behavior 13 (1996), 111-124},
abstract = {A class of non-cooperative games in which the players share a common set of strategies is described. The payoff a player receives for playing a particular strategy depends only on the total number of playing the same strategy and decreases monotonously with that number in a manner which is specific to the particular player. It is shown that each game in this class possesses at least one Nash equilibrium in pure strategies.},
url = {/files/dp15.pdf},
author = {Igal Milchtaich}
}
@booklet {tijs-tcpfgisf1992,
title = {Consistency Principle for Games in Strategic Form, The},
journal = {Discussion Papers},
number = {19},
year = {1992},
month = {11},
publisher = {International Journal of Game Theory 25 (1996), 13-34},
abstract = {We start with giving an axiomatic characterization of the Nash equilibrium (NE) correspondence in terms of consistency, converse consistency, and one-person rationality. Then axiomatizations are given of the strong NE correspondence, the coalition proof NE correspondence and the semi-strong NE. In all these characterizations consistency and suitable variants of converse consistency play a role. Finally, the dominant NE correspondence is characterized. We also indicate how to generalize our results to Bayesian and extensive games.},
url = {{\textquoteright}},
author = {Tijs, Bezalel Peleg and Stef}
}
@booklet {winter-ciairtsfc1992,
title = {Core Implementation and Increasing Returns to Scale for Cooperation},
journal = {Discussion Papers},
number = {23},
year = {1992},
month = {12},
publisher = {Journal of Mathematical Economics 23 (1994), 533-548},
abstract = {In this paper we analyze a simple non-cooperative bargaining model for coalition formation and payoff distribution in games with coalition form. We show that under our bargaining regime a cooperative game is core implementable if and only if it possesses the property of increasing returns to scale for cooperation. Namely, the game is convex. This offers a characterization of a purely cooperative notion by means of its non-cooperative foundations.},
url = {{\textquoteright}},
author = {Winter, Benny Moldovanu and Eyal}
}
@booklet {jose-luisferreira-ceigwucdtp1992,
title = {Credible Equilibria in Games with Utilities Changing During the Play},
journal = {Discussion Papers},
number = {5},
year = {1992},
month = {2},
publisher = {Games and Economic Behavior 10 (1995), 284-317},
abstract = {Whenever one deals with an interactive decision situation of long duration, one has to take into account that priorities of the participants may change during the conflict. In this paper we propose an extensive-form game model to handle such situations and suggest and study a solution concept, called credible equilibrium, which generalizes the concept of Nash equilibrium. We also discuss possible variants to this concept and applications of the model to other types of games.},
url = {/files/dp5.pdf},
author = {Jose-Luis Ferreira, Itzhak Gilboa, Michael Maschler}
}
@booklet {mas-colell-esolgitaa1992,
title = {Egalitarian Solutions of Large Games: II. The Asymptotic Approach},
journal = {Discussion Papers},
number = {2},
year = {1992},
month = {1},
publisher = {Mathematics of Operations Research 20 (1995), 1003-1022},
abstract = {This is the second of two papers developing the theory of Egalitarian solutions for games in coalitional form with non-transferable utility (NTU) and a large number of players. This paper is devoted to the study of the egalitarian solutions of finite games as the number of players increases. We show that these converge to the egalitarian solution of the limit game with a continuum of players as defined in our previous paper. The same convergence holds for the underlying potential functions.},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {volij-ecfeibwi1992,
title = {Epistemic Conditions for Equilibrium in Beliefs Without Independence},
journal = {Discussion Papers},
number = {4},
year = {1992},
month = {1},
publisher = {Journal of Economic Theory 70 (1996), 391-406},
abstract = {Aumann and Brandenburger (1991) describe sufficient conditions on the knowledge of the players in a game for a Nash equilibrium to exist. They assumed, among other things, mutual knowledge of rationality. By rationality of a player, they mean that the action chosen by him maximizes his expected utility, given his beliefs. There is, however, no need to restrict the notion of rationality to expected utility maximization. This paper shows that their result can be generalized to the case where the players preferences over uncertain outcomes can be represented by a continuous function, not necessarily linear in the probabilities.},
url = {/files/dp4.pdf},
author = {Volij, Oscar}
}
@booklet {linial-gcpgaoc1992,
title = {Games Computers Play: Game-Theoretic Aspects of Computing},
journal = {Discussion Papers},
number = {6},
year = {1992},
month = {2},
publisher = {Handbook of Game Theory, Vol. II, R. J. Aumann \& S. Hart (eds.), North-Holland (1994), 1340-1395},
abstract = {This is a survey of some connections between game theory and theoretical computer science. The main emphasis is on theories of fault-tolerant computing. The paper is largely self-contained.},
url = {{\textquoteright}},
author = {Linial, Nathan}
}
@booklet {neter-haiivhliiadfipj1992,
title = {How Alike Is It Versus How Likely Is It: A Disjunction Fallacy in Probability Judgments},
journal = {Discussion Papers},
number = {20},
year = {1992},
month = {11},
publisher = {Journal of Personality and Social Psychology 65 (1993), 1119-1132},
abstract = {Formally, a conjunction fallacy and a disjunction fallacy cannot be distinguished. Both consist of a violation of the rule that an event cannot be more probable than another event which includes it. Hitherto, only a special kind of violation of this rule has been demonstrated, namely, that people sometimes judge the probability of A \& B to be higher than the probability of A (Tversky \& Kahneman, 1983). This study demonstrates a violation of the rule in a context that justifies the label disjunction fallacy. Subjects received brief case descriptions, and ordered seven categories according to one of four criteria for including the case as a member of the category : 1. probability of membership ; 2. willingness to bet on membership ; 3. inclination to predict membership ; 4. suitability for membership. The category list included nested pairs of categories, such as Brazil and South American country, or Physics and A Natural Science. The more inclusive category was a union of basic level sets like the smaller category. From a normative standpoint, the first two criteria are equivalent, and either ranking a category as more probable than its superordinate, or betting on it rather than on its superordinate, is fallacious. On the other hand, inclination to predict may be guided by the desire to be maximize informativeness rather than merely likelihood of being correct, and suitability needs to conform to no formal rule. Hence, with respect to these two criteria, such a ranking pattern is not fallacious. In spite of this crucial difference, subjects in all four groups rendered highly similar judgments, and the ranking of categories higher than their superodinates was not lower when it amounted to a fallacy than when it did not. The results support the representativeness thesis against some alternative interpretations.},
url = {/files/dp20.pdf},
author = {Neter, Maya Bar-Hillel and Efrat}
}
@booklet {mayabar-hillel-jodj1992,
title = {Judgments of Distributive Justice},
journal = {Discussion Papers},
number = {17},
year = {1992},
month = {9},
publisher = {In Psychological Perspectives on Justice. B. Mellers \& J. Baron (eds.) Cambridge University Press (1993) Ch. 4, 55-84},
abstract = {The basic rule of distributive justice is the proportionality rule, which states that "Distributive justice involves a relationship between ... two persons, P1 and P2 one of whom can be assessed as higher than, or lower than, the other; and their two shares, or ... rewards, R1 and R2. The condition of distributive justice is satisfied when ... : P1/P2=R1/R2". (Homans, 1961). We studied this rule, in survey style, using cases such as the following: "suppose you have 12 grapefruit which you divide between Jones and Smith in as just a manner as possible. How should this be done ?". in our problems, either one or two goods were to be allocated between two recipients who differed on at most one dimension, either needs (e.g, Smith requires more grapefruit than Jones), beliefs (e.g, Smith believes that grapefruit are less nutritious than Jones believes them to be), or tastes (e.g, Smith enjoys grapefruit more than Jones). the results show that it is very hard to be more specific than the general formulation above without being ad hoc. For example, most people wish to allocate proportionately to need, only a minority wish to allocate proportionately to beliefs, and insofar as people wish to take tastes into consideration, they do so in a non-compensatory fashion. In other words, with regard to needs, less efficient extractors are awarded larger shares, but with regard to pleasure, more efficient extractors are awarded larger shares. Since real world distribution problems don{\textquoteright}t come neatly labelled as needs, tastes, etc., it is hard to predict or theorize what would be "just" in them.},
url = {/files/db17.pdf},
author = {Maya Bar-Hillel, Menahem Yaari}
}
@booklet {winter-mrimb1992,
title = {Mechanism Robustness in Multilateral Bargaining},
journal = {Discussion Papers},
number = {7},
year = {1992},
month = {3},
publisher = {Theory and Decision 40 (1996), 131-147},
abstract = {We describe a non-cooperative bargaining model for games in coalition form without transferable utility. In this model random moves determine the order by which the players take their actions. the specific assignment of probability distributions to these chance moves is called the mechanics of the bargaining. Within this framework we examine the relation between the property of mechanism robustness, and coalition stability of the bargaining outcome, by showing that these two properties boil down to be the same.},
url = {{\textquoteright}},
author = {Winter, Eyal}
}
@booklet {mas-colell-amonnb1992,
title = {A Model of N-Person Non-Cooperative Bargaining},
journal = {Discussion Papers},
number = {10},
year = {1992},
month = {7},
publisher = {Pubslied as "Bargaining and Value" in Econometrica 64 (1996), 357-380},
abstract = {We present and analyze a model of non-cooperative bargaining among n participants, applied to games in cooperative form. This leads to a unified theory that has as special cases the Shapley value solution in the transferable utility case; the Nash bargaining solution in the pure bargaining case; and finally, the recently introduced Maschler-Owen consistent value solution in the general (non-transferable utility) case.},
url = {{\textquoteright}},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {shmida-nsaesfs1992,
title = {Near-Far Search: An Evolutionarily Stable Foraging Strategy},
journal = {Discussion Papers},
number = {18},
year = {1992},
month = {10},
publisher = {Journal of Theoretical Biology 173 (1995), 15-22},
abstract = {This study addresses the momentary rules of foraging behavior on carpet inflorescences. It has long been suggested that patchiness in the distribution of nectar can give an advantage to near-far type of foraging strategies, that is, to foragers which search "near" (in the neighborhood of the last visited flower) as long as the nectar yield is high enough, and go "far" otherwise. Here we show that under certain conditions, such a strategy can be evolutionary stable. Furthermore, prior patchiness in the nectar distribution is not a necessary condition for the evolutionary stability of a near-far search. It turns out that during near-far foraging, some patchiness is created by the foraging process itself, which the near-far forager can exploit later on. To show the evolutionary stability of near-far search, various foraging strategies were compared, according to two, slightly different optimality criteria : the number of flowers emptied during a fixed length bout, and the number of flowers visited until total extraction of the entire inflorescence. We find that long enough bouts (in the case of a single forager) or a substantial probability of revisits to the same inflorescence (in the case of multipleforagers) are necessary for near-far to be an ESS.},
url = {/files/db18.pdf},
author = {Shmida, Uzi Motro and Avi}
}
@booklet {mas-colell-aniovap1992,
title = {A Non-Cooperative Interpretation of Value and Potential},
journal = {Discussion Papers},
number = {3},
year = {1992},
month = {1},
publisher = {In R. Selten (ed.) Rational Interaction (1992) Springer-Verlag 83-93},
abstract = {Given a (TU or NTU) game in characteristic form an auxiliary two-person zero sum game is presented whose maximin = minimax value is precisely the potential of the game. In the auxiliary game one of the players tries to buy off the members of the original game by choosing the order in which to approach them, while the other player sets the price of those members so as to make the expense incurred as high as possible.},
url = {{\textquoteright}},
author = {Mas-Colell, Sergiu Hart and Andreu}
}
@booklet {reny-anvocfatc1992,
title = {A Noncooperative View of Coalition Formation and the Core},
journal = {Discussion Papers},
number = {21},
year = {1992},
month = {11},
publisher = {Econometrica 62 (1994), 795-818},
abstract = {Much of the core{\textquoteright}s appeal stems from the intuitive and natural story behind it, the story that first motivated F.Y Edgeworth in 1881. Thus the primary motivation for the core is noncooperative in nature. Nonetheless, the core is not a noncooperative solution concept. This is because, in particular, the possibilities for forming coalitions, and making offers and counteroffers, are not explicitly modeled. In this work, we provide a noncooperative implementation of the core. However, we do not merely implement the core. The nature of the game form employed is designed to reflect the motivating story as accurately as possible. The present results thus provide formal content to the usual intuitive justification for the core. In our view, the core would lose much of its appeal were it not possible to provide such a noncooperative foundation.},
url = {/files/dp21.pdf},
author = {Reny, Motty Perry and Philip J.}
}
@booklet {shmida-pgnatdofs1992,
title = {Pollination, Gathering Nectar, and the Distribution of Flower Species},
journal = {Discussion Papers},
number = {14},
year = {1992},
month = {10},
publisher = {Journal of Theoretical Biology 175 (1995), 127-138},
abstract = {We present here a model of pollination having one species of bees and several species of flowers. Each flower species is distinguished by its rate of nectar production and the resources it devotes to display. The flowers and bees are assumed to have identical lifetimes that comprise a number of days within a single year. At the start of the year the bees in their naive phase are attracted to flowers according to the relative sizes of the flowers{\textquoteright} displays; however, the bees soon become experienced and continually monitor the amounts of the nectar standing crops of each species, altering their visiting habits over time so that they always tend to visit most frequently the flower species having the largest nectar standing crop. This, in turn, tends equalize the nectar standing crop across species. From one year to the next the relative abundance of the flower species can change in accordance with the reproductive success of each species. This, in turn, depends upon the number of visits by bees to the flowers of each species, the amount of energy devoted to reproduction, and the relative abundance of each species in the preceding year. The model described below has been programmed so that it is possible to run simulations. We make no attempt to model the absolute number of bees or of flowers, but do assume the ratio of bees to flowers is the same from one season to the next. Within this model systematic deviations by the bees from apparently optimal foraging policies can be seen, due to monitoring by the bees, and also the ability to survive of large display flowers that produce no nectar ("cheaters") can be explained.},
url = {/files/dp14.pdf},
author = {Shmida, James W. Friedman and Avi}
}
@booklet {hart-opg1992,
title = {On Prize Games},
journal = {Discussion Papers},
number = {8},
year = {1992},
month = {5},
publisher = {Essays in Game Theory, N. Megiddo (ed.), Springer-Verlag (1994), 111-121},
abstract = {We consider the class of hyperplane coalition games (H-games): the feasible set of each coalition is a half-space, with a slope that may vary from one coalition to another. H-games have turned out in various approaches to the value of general non-transferable utility (NTU) games. In this paper we introduce a simple model {\textendash} prize games {\textendash} that generates the hyperplane games. next, we provide an axiomatization for the Maschler \& Owen (1989) consistent value of H-games.},
url = {{\textquoteright}},
author = {Sergiu Hart}
}
@booklet {peleg-sasceatdoteobcwar1992,
title = {Strict and Symmetric Correlated Equilibria Are the Distributions of the ESS{\textquoteright}s of Biological Conflicts with Asymmetric Roles},
journal = {Discussion Papers},
number = {11},
year = {1992},
month = {8},
publisher = {In W. Albers, W. Guth, P. Hammerstein, B. Moldovanu \& E. van Damme (eds.), Understanding Strategic Interaction, Essays in Honor of R. Selten, (1997) Springer-Verlag 149-170},
abstract = {We investigate the ESS{\textquoteright}s of payoff-irrelevant asymmetric animal conflicts in Selten{\textquoteright}s (1980) model. We show that these are determined by the symmetric and strict correlated equilibria of the underlying (symmetric) two-person game. More precisely, the set of distributions (on the strategy space) of ESS{\textquoteright}s coincides with the set of strict and symmetric correlated equilibria (described as distributions). Our result enables us to predict all possible stable payoffs in payoff-irrelevant asymmetric animal conflicts using Aumann{\textquoteright}s correlated equilibria. Italso enables us to interpret correlated equilibria as solutions to biological conflicts: Nature supplies the correlation device as a phenotypic conditional behavior.},
url = {{\textquoteright}},
author = {Peleg, Avi Shmida and Bezalel}
}
@booklet {davidbudescu-tgontg1992,
title = {To Guess or Not to Guess},
journal = {Discussion Papers},
number = {13},
year = {1992},
month = {9},
publisher = {Journal of Educational Measurement 30 (1993), 277-291},
abstract = {Multiple choice tests that are scored by formula scoring typically include instructions that discourage guessing. In this paper we look at test taking from the normative and descriptive perspectives of judgment and decision theory. We show that for a rational test taker, whose goal is the maximization of expected score, answering is either superior or equivalent to omitting {\textendash} a fact which follows from the scoring formula. For test takers who are not fully rational, or have goals other than the maximization of expected score, it is very hard to give adequate formula scoring instructions, and the recommen-dation to answer under partial knowledge is problematic (though generally beneficial). Our analysis derives from a critical look at standard assumptions about the epistemic states, response strategies, and strategic motivations of test takers. In conclusion, we endorse the "number right" scoring rule, which discourages omissions, and is robust against variability in respondent motivations, limitations in judgments of uncertainty, and item vagaries.},
url = {/files/ 2guess-13.pdf},
author = {David Budescu, Maya Bar-Hillel}
}